コード例 #1
0
ファイル: scheduler.py プロジェクト: MariaAbrahms/otter
 def stopService(self):
     """
     Stop this service. This will release buckets partitions it holds
     """
     TimerService.stopService(self)
     if self.kz_partition.acquired:
         return self.kz_partition.finish()
コード例 #2
0
 def test_pickleTimerServiceNotPickleLoopFinished(self):
     """
     When pickling L{internet.TimerService}, it won't pickle
     L{internet.TimerService._loopFinished}.
     """
     # We need a pickleable callable to test pickling TimerService. So we
     # can't use self.timer
     timer = TimerService(1, fakeTargetFunction)
     timer.startService()
     dumpedTimer = pickle.dumps(timer)
     timer.stopService()
     loadedTimer = pickle.loads(dumpedTimer)
     nothing = object()
     value = getattr(loadedTimer, "_loopFinished", nothing)
     self.assertIdentical(nothing, value)
コード例 #3
0
 def stopService(self):
     if self.log_file is None:
         return defer.succeed(None)
     else:
         self.log_file.close()
         self.log_file = None
         return TimerService.stopService(self)
コード例 #4
0
ファイル: service.py プロジェクト: ra2003/txacme
class AcmeIssuingService(Service):
    """
    A service for keeping certificates up to date by using an ACME server.

    :type cert_store: `~txacme.interfaces.ICertificateStore`
    :param cert_store: The certificate store containing the certificates to
        manage.

    :type client: `txacme.client.Client`
    :param client: A client which is already set to be used for an
        environment.  For example, ``Client.from_url(reactor=reactor,
        url=LETSENCRYPT_STAGING_DIRECTORY, key=acme_key, alg=RS256)``.
        When the service is stopped, it will automatically call the stop
        method on the client.

    :param clock: ``IReactorTime`` provider; usually the reactor, when not
        testing.

    :type responders: List[`~txacme.interfaces.IResponder`]
    :param responders: Challenge responders.  Usually only one responder is
        needed; if more than one responder for the same type is provided, only
        the first will be used.
    :param str email: An (optional) email address to use during registration.
    :param ~datetime.timedelta check_interval: How often to check for expiring
        certificates.
    :param ~datetime.timedelta reissue_interval: If a certificate is expiring
        in less time than this interval, it will be reissued.
    :param ~datetime.timedelta panic_interval: If a certificate is expiring in
        less time than this interval, and reissuing fails, the panic callback
        will be invoked.

    :type panic: Callable[[Failure, `str`], Deferred]
    :param panic: A callable invoked with the failure and server name when
        reissuing fails for a certificate expiring in the ``panic_interval``.
        For example, you could generate a monitoring alert.  The default
        callback logs a message at *CRITICAL* level.
    :param generate_key: A 0-arg callable used to generate a private key for a
        new cert.  Normally you would not pass this unless you have specialized
        key generation requirements.
    """
    cert_store = attr.ib()
    _client = attr.ib()
    _clock = attr.ib()
    _responders = attr.ib()
    _email = attr.ib(default=None)
    check_interval = attr.ib(default=timedelta(days=1))
    reissue_interval = attr.ib(default=timedelta(days=30))
    panic_interval = attr.ib(default=timedelta(days=15))
    _panic = attr.ib(default=_default_panic)
    _generate_key = attr.ib(default=partial(generate_private_key, u'rsa'))

    _waiting = attr.ib(default=attr.Factory(list), init=False)
    _issuing = attr.ib(default=attr.Factory(dict), init=False)
    ready = False

    def _now(self):
        """
        Get the current time.
        """
        return clock_now(self._clock)

    def _check_certs(self):
        """
        Check all of the certs in the store, and reissue any that are expired
        or close to expiring.
        """
        log.info('Starting scheduled check for expired certificates.')

        def check(certs):
            panicing = set()
            expiring = set()
            for server_name, objects in certs.items():
                if len(objects) == 0:
                    panicing.add(server_name)
                for o in filter(lambda o: isinstance(o, Certificate), objects):
                    cert = x509.load_pem_x509_certificate(
                        o.as_bytes(), default_backend())
                    until_expiry = cert.not_valid_after - self._now()
                    if until_expiry <= self.panic_interval:
                        panicing.add(server_name)
                    elif until_expiry <= self.reissue_interval:
                        expiring.add(server_name)

            log.info(
                'Found {panicing_count:d} overdue / expired and '
                '{expiring_count:d} expiring certificates.',
                panicing_count=len(panicing),
                expiring_count=len(expiring))

            d1 = (gatherResults([
                self._issue_cert(server_name).addErrback(
                    self._panic, server_name) for server_name in panicing
            ],
                                consumeErrors=True).addCallback(done_panicing))
            d2 = gatherResults([
                self.issue_cert(server_name).addErrback(lambda f: log.failure(
                    u'Error issuing certificate for: {server_name!r}',
                    f,
                    server_name=server_name)) for server_name in expiring
            ],
                               consumeErrors=True)
            return gatherResults([d1, d2], consumeErrors=True)

        def done_panicing(ignored):
            self.ready = True
            for d in list(self._waiting):
                d.callback(None)
            self._waiting = []

        return (self._ensure_registered().addCallback(
            lambda _: self.cert_store.as_dict()).addCallback(
                check).addErrback(lambda f: log.failure(
                    u'Error in scheduled certificate check.', f)))

    def issue_cert(self, server_name):
        """
        Issue a new cert for a particular name.

        If an existing cert exists, it will be replaced with the new cert.  If
        issuing is already in progress for the given name, a second issuing
        process will *not* be started.

        :param str server_name: The name to issue a cert for.

        :rtype: ``Deferred``
        :return: A deferred that fires when issuing is complete.
        """
        def finish(result):
            _, waiting = self._issuing.pop(server_name)
            for d in waiting:
                d.callback(result)

        # d_issue is assigned below, in the conditional, since we may be
        # creating it or using the existing one.
        d = Deferred(lambda _: d_issue.cancel())
        if server_name in self._issuing:
            d_issue, waiting = self._issuing[server_name]
            waiting.append(d)
        else:
            d_issue = self._issue_cert(server_name)
            waiting = [d]
            self._issuing[server_name] = (d_issue, waiting)
            # Add the callback afterwards in case we're using a client
            # implementation that isn't actually async
            d_issue.addBoth(finish)
        return d

    def _issue_cert(self, server_name):
        """
        Issue a new cert for a particular name.
        """
        log.info('Requesting a certificate for {server_name!r}.',
                 server_name=server_name)
        key = self._generate_key()
        objects = [
            Key(
                key.private_bytes(
                    encoding=serialization.Encoding.PEM,
                    format=serialization.PrivateFormat.TraditionalOpenSSL,
                    encryption_algorithm=serialization.NoEncryption()))
        ]

        def answer_and_poll(authzr):
            def got_challenge(stop_responding):
                return (poll_until_valid(authzr, self._clock,
                                         self._client).addBoth(
                                             tap(lambda _: stop_responding())))

            return (answer_challenge(
                authzr, self._client,
                self._responders).addCallback(got_challenge))

        def got_cert(certr):
            objects.append(
                Certificate(
                    x509.load_der_x509_certificate(
                        certr.body, default_backend()).public_bytes(
                            serialization.Encoding.PEM)))
            return certr

        def got_chain(chain):
            for certr in chain:
                got_cert(certr)
            log.info('Received certificate for {server_name!r}.',
                     server_name=server_name)
            return objects

        return (
            self._client.request_challenges(
                fqdn_identifier(server_name)).addCallback(answer_and_poll).
            addCallback(lambda ign: self._client.request_issuance(
                CertificateRequest(csr=csr_for_names([server_name], key))
            )).addCallback(got_cert).addCallback(
                self._client.fetch_chain).addCallback(got_chain).addCallback(
                    partial(self.cert_store.store, server_name)))

    def _ensure_registered(self):
        """
        Register if needed.
        """
        if self._registered:
            return succeed(None)
        else:
            return self._register()

    def _register(self):
        """
        Register and agree to the TOS.
        """
        def _registered(regr):
            self._regr = regr
            self._registered = True

        regr = messages.NewRegistration.from_data(email=self._email)
        return (self._client.register(regr).addCallback(
            self._client.agree_to_tos).addCallback(_registered))

    def when_certs_valid(self):
        """
        Get a notification once the startup check has completed.

        When the service starts, an initial check is made immediately; the
        deferred returned by this function will only fire once reissue has been
        attempted for any certificates within the panic interval.

        ..  note:: The reissue for any of these certificates may not have been
            successful; the panic callback will be invoked for any certificates
            in the panic interval that failed reissue.

        :rtype: ``Deferred``
        :return: A deferred that fires once the initial check has resolved.
        """
        if self.ready:
            return succeed(None)
        d = Deferred()
        self._waiting.append(d)
        return d

    def startService(self):
        Service.startService(self)
        self._registered = False
        self._timer_service = TimerService(self.check_interval.total_seconds(),
                                           self._check_certs)
        self._timer_service.clock = self._clock
        self._timer_service.startService()

    def stopService(self):
        Service.stopService(self)
        self.ready = False
        self._registered = False
        for d in list(self._waiting):
            d.cancel()
        self._waiting = []

        deferred = self._client.stop()
        deferred.addCallback(lambda _: self._timer_service.stopService())
        return deferred
コード例 #5
0
ファイル: bot.py プロジェクト: BackupTheBerlios/pychat-svn
class TehBot(irc.IRCClient):
    """A IRC bot."""

    def connectionMade(self):
        self.logger = MessageLogger(open(self.factory.filename, "a"))
        self.logger.log("[connected at %s]" % asctime(localtime(time())))
        self.options = self.factory.options
        self.svn = self.factory.svn
        self.nickname = self.options.nick
        self.realname = self.options.name
        self.authQ = []
        self.loggedIn = []
        self.topics = {}
        self.redos = {}
        self.undos = {}
        self.userWatch = {}
        self.undo = False
        self.lastSearchStart = -1
        self.lastSearchQuery = None
        self.init()
        irc.IRCClient.connectionMade(self)

    def connectionLost(self, reason):
        irc.IRCClient.connectionLost(self, reason)
        self.logger.log("[disconnected at %s]" % asctime(localtime(time())))
        self.logger.close()

    def init(self):
        # set vars that may change during execution
        self.versionName = self.options.options['CLIENTNAME']
        self.versionNum = self.options.options['VERSION']
        self.versionEnv = sys.platform
        self.options.options['REVISION'] = self.svn.lastRev()
        self.readWatchDataFromFile()
        
        # SVN announce code
        if self.options.announce:    # if svn announce commits mode
            # create/start timer
            self.timer = TimerService(self.options.frequency, self.svnAnnounce)
            self.timer.startService()
        else:
            if hasattr(self, 'timer') and self.timer.running:  # if running
                self.timer.stopService() # stop it
    
    def svnAnnounce(self):
        rev = self.svn.lastRev()

        if rev == -1:
            print 'ERROR: Error connecting to SVN repo'
            return
        
        if rev != self.options.options['REVISION']: # check against stored revision
            # new commit, yay :)
            temp = int(self.options.options['REVISION'])
            temp += 1
            self.options.options['REVISION'] = str(temp)
            for target in self.options.announce_targets:    # tell everybody about it
                self.cmd_lastlog(target, target, [])

    def writeWatchDataToFile(self):
        """Outputs watch data to permanent storage (disk)"""
        if not checkDir('watchdata'):
            mkdir('watchdata')

        current = getcwd()
        chdir('watchdata')

        for user in self.userWatch:
            f = open(user + '.watch', 'w')
            for message in self.userWatch[user]:
                f.write('%s<*!*>%s' % (message, self.userWatch[user][message]))
            f.close()   

        chdir(current)

    def readWatchDataFromFile(self):
        """Outputs watch data to permanent storage (disk)"""
        if not checkDir('watchdata'):
            mkdir('watchdata')

        current = getcwd()
        chdir('watchdata')

        for user in self.options.watchUsers:
            if not self.userWatch.has_key(user):
                self.userWatch[user] = {}
            try:
                f = open(user + '.watch', 'r')
                for line in f:
                    message, count = line.split('<*!*>')
                    self.userWatch[user][message.strip()] = int(count)
                f.close()
            except IOError:
                continue
            
        chdir(current)

    # callbacks for events

    def signedOn(self):
        """Called when bot has succesfully signed on to server."""
        if self.options.registeredNick:
            self.msg('nickserv','identify ' + self.options.nickPassword)
        for chan in self.options.chanstojoin:
            self.join(chan)

    def joined(self, channel):
        """This will get called when the bot joins the channel."""
        self.options.channels.append(channel.lower())
        for user in self.options.authUsers:
            if user.lower() not in self.loggedIn:
                print ' *** Attempting login for %s on %s' % (user, channel)
                self.cmd_login(user, channel, [])

    def userJoined(self, user, channel):
        """Called when a user joins a channel Im on"""
        user = user.split('!', 1)[0]
        if user != self.nickname:
            print 'JOIN: %s on %s' % (user, channel)
            self.logger.log('JOIN: %s on %s' % (user, channel))
            
            if self.options.welcome:
                if user.lower() in self.options.authUsers:
                    self.msg(channel, 'Welcome to %s, the all powerful %s, thank you for blessing us with your presence' % (channel, user))
                else:
                    self.msg(channel, 'Welcome to %s, %s' % (channel, user))

            if user in self.options.authUsers:
                print ' *** Attempting login for %s on %s' % (user, channel)
                self.cmd_login(user, channel, [])

    def kickedFrom(self, channel, kicker, message):
        """Called when Im kicked from a channel"""
        if self.options.rejoin:
            self.join(channel)
            self.msg(channel, '%s: thanks for that (%s)' % (kicker, message))

    def action(self, user, channel, data):
        """Called when another user performs an action"""
        user = user.split('!', 1)[0]
        msg = data.strip()
        self.logger.log('(%s): *%s %s ' % (channel, user, msg))
   
    def cmd_login(self, user, channel, params):
        """Gains usage access to bot. Usage: LOGIN"""
        #XXX:   this is non-RFC standard message (307), so may not 
        #       work on other servers, besides Shadowfire
        if user.lower() in self.options.authUsers:
            self.authQ.append(user)
            self.sendLine("WHOIS %s" % user)
        else:
            self.msg(user, 'ERROR: You Are Not Authorised!')
    
    def cmd_logout(self, user, channel, params):
        """Removes usage access to bot. Usage: LOGOUT"""
        if user.lower() in self.loggedIn:
            self.loggedIn.remove(user.lower())
        else:
            self.msg(user, 'ERROR: Not Logged In')
    
    def irc_307(self, prefix, params):
        """Reply from WHOIS message, indicates a registered nick"""
        if len(params) == 3:
            user = params[1].lower()
            msg = params[2]

            if user in self.authQ:
                self.authQ.remove(user)
                if user not in self.loggedIn:
                    if msg == 'is a registered nick':
                        self.loggedIn.append(user)
                
                        if self.options.announceLogins:
                            self.msg(user, 'You are now Logged In!')
   
    def privmsg(self, user, channel, msg):
        """This will get called when the bot receives a message."""
        user = user.split('!', 1)[0]
        message = msg.strip()
        checkUser = user.lower()

        self.logger.log('%s (%s): %s' % (user, channel, msg))

        if channel in self.options.channels:
        # if from channel,then only process if proceeded by nick: or !nick:
        # if nick, then respond to user from which msg originated
        # if !nick, then respond to channel
            if message.startswith(self.nickname + ':'):
                message = message[len(self.nickname)+1:].strip()
            elif message.startswith('!' + self.nickname + ':'):
                message = message[len(self.nickname)+2:].strip()
                user = channel
            elif message.startswith(self.options.channelChar):
                message = message[len(self.options.channelChar):].strip()
                user = channel
            else:
                return
        elif self.nickname != channel:
            return

        params = message.split()
        command = params.pop(0)

        # empty command like "::", just ignore
        if not command:
            return
        
        # is the message from an authorised user? or open command? 
        if checkUser in self.loggedIn or command.lower() in self.options.openCommands:
            print 'DEBUG: Remote Command %s from %s with parameters: %s' % (command, user, params)
            self.logger.log('Remote Command: %s from %s with parameters: %s' % (command, user, params))

            # special cases
            if command.lower() == 'login':
                self.cmd_login(checkUser, channel, params)
            elif command.lower() == 'logout':
                self.cmd_logout(checkUser, channel, params)
            elif command.lower() in _admin_commands and checkUser not in self.options.authors:
                self.msg(user, 'ERROR: Administrator only command: %s' % command)
            elif command.lower() in self.options.disabledCommands and checkUser not in self.options.authors:
                self.msg(user, 'ERROR: Disabled command: %s' % command)
            else:
                # despatch command
                try:
                    handler = getattr(self, 'cmd_' + command.lower())
                except AttributeError:
                    self.msg(user, 'Unrecognised command: %s' % command)
                else:
                    # call the handler here in case it throws an AttributeError
                    handler(user, channel, params)

    # User commands

    def cmd_quit(self, user, channel, params):
        """Quits IRC. Usage: QUIT <quit message>"""
        self.factory.quit = True
        self.quit(' '.join(params))

    def cmd_op(self, user, channel, params):
        """Gives Channel operator status to a user. Usage: OP [channel] <user | me>"""
        if len(params) > 1:
            # they've specified a channel
            channel = params.pop(0)

        target = params[0]
        if (target.lower() == 'me'):
            target = user

        self.mode(channel, 1, 'o', user=target)

    def cmd_deop(self, user, channel, params):
        """Removes Channel operator status from a user. Usage: DEOP [channel] <user | me>"""
        if len(params) > 1:
            # they've specified a channel
            channel = params.pop(0)

        target = params[0]
        if (target.lower() == 'me'):
            target = user

        self.mode(channel, 0, 'o', user=target)

    def cmd_topic(self, user, channel, params):
        """ Updates the current channel's topic. Usage: TOPIC [channel] [command] <topic>

             Commands:

               add  - add to current topic. Usage: TOPIC [channel] add <text>
               del  - remove from topic, based on position (starting at 0). Usage: TOPIC [channel] del <index>
               edit - replaces topic in specified position with new text. Usage: TOPIC [channel] edit <index> <text>
               set  - sets the topic. Usage: TOPIC [channel] set <text>
               get  - gets the current topic. Usage: TOPIC [channel] get
               undo - undo the last topic change. Usage: TOPIC [channel] undo
               redo - redo the last topic undo. Usage: TOPIC [channel] redo
               
               replace - replaces one word (using regexp) with a phrase throughout the whole topic. Usage: TOPIC [channel] replace <to replace - regexp> <phrase>
        """

        if len(params) > 1:
            if params[0] in self.options.channels:
                channel = params.pop(0)
            
            command = params.pop(0).lower()
    
            if not self.topics.has_key(channel): 
                self.topics[channel] = []
                
            current = self.topics[channel]

            if command == 'add':
                temp = current + [' '.join(params)]
#               current.append(' '.join(params))
                topic = ' | '.join(temp)
            elif command == 'del':
                index = int(params.pop(0))
                topic = current[:index]
                index += 1

                if index > 0:
                    topic.extend(current[index:])

                topic = ' | '.join(topic)
            elif command == 'edit':
                index = int(params.pop(0))
                current[index] = ' '.join(params)
                topic = ' | '.join(current)
            elif command == 'replace':
                #what = params.pop(0)
                what = re.compile(params.pop(0))
                with = ' '.join(params)
                topic = ' | '.join(current) 
                #topic = topic.replace(what, with)
                topic = what.sub(with, topic)
            elif command == 'get':
                self.msg(user, 'topic for %s is: %s' % (channel, ' | '.join(current)))
                return
コード例 #6
0
ファイル: service.py プロジェクト: twisted/txacme
class AcmeIssuingService(Service):
    """
    A service for keeping certificates up to date by using an ACME server.

    :type cert_store: `~txacme.interfaces.ICertificateStore`
    :param cert_store: The certificate store containing the certificates to
        manage.

    :type client: `txacme.client.Client`
    :param client: A client which is already set to be used for an
        environment.  For example, ``Client.from_url(reactor=reactor,
        url=LETSENCRYPT_STAGING_DIRECTORY, key=acme_key, alg=RS256)``.
        When the service is stopped, it will automatically call the stop
        method on the client.

    :param clock: ``IReactorTime`` provider; usually the reactor, when not
        testing.

    :type responders: List[`~txacme.interfaces.IResponder`]
    :param responders: Challenge responders.  Usually only one responder is
        needed; if more than one responder for the same type is provided, only
        the first will be used.
    :param str email: An (optional) email address to use during registration.
    :param ~datetime.timedelta check_interval: How often to check for expiring
        certificates.
    :param ~datetime.timedelta reissue_interval: If a certificate is expiring
        in less time than this interval, it will be reissued.
    :param ~datetime.timedelta panic_interval: If a certificate is expiring in
        less time than this interval, and reissuing fails, the panic callback
        will be invoked.

    :type panic: Callable[[Failure, `str`], Deferred]
    :param panic: A callable invoked with the failure and server name when
        reissuing fails for a certificate expiring in the ``panic_interval``.
        For example, you could generate a monitoring alert.  The default
        callback logs a message at *CRITICAL* level.
    :param generate_key: A 0-arg callable used to generate a private key for a
        new cert.  Normally you would not pass this unless you have specialized
        key generation requirements.
    """
    cert_store = attr.ib()
    _client = attr.ib()
    _clock = attr.ib()
    _responders = attr.ib()
    _email = attr.ib(default=None)
    check_interval = attr.ib(default=timedelta(days=1))
    reissue_interval = attr.ib(default=timedelta(days=30))
    panic_interval = attr.ib(default=timedelta(days=15))
    _panic = attr.ib(default=_default_panic)
    _generate_key = attr.ib(default=partial(generate_private_key, u'rsa'))

    _waiting = attr.ib(default=attr.Factory(list), init=False)
    _issuing = attr.ib(default=attr.Factory(dict), init=False)
    ready = False
    # Service used to repeatedly call the certificate check and renewal.
    _timer_service = None
    # Deferred of the current certificates check.
    # Added to help the automated testing.
    _ongoing_check = None

    def _now(self):
        """
        Get the current time.
        """
        return clock_now(self._clock)

    def _check_certs(self):
        """
        Check all of the certs in the store, and reissue any that are expired
        or close to expiring.
        """
        log.info('Starting scheduled check for expired certificates.')

        def check(certs):
            panicing = set()
            expiring = set()
            for server_names, objects in certs.items():
                if len(objects) == 0:
                    panicing.add(server_names)
                for o in filter(lambda o: isinstance(o, pem.Certificate),
                                objects):
                    cert = x509.load_pem_x509_certificate(
                        o.as_bytes(), default_backend())
                    until_expiry = cert.not_valid_after - self._now()
                    if until_expiry <= self.panic_interval:
                        panicing.add(server_names)
                    elif until_expiry <= self.reissue_interval:
                        expiring.add(server_names)

            log.info(
                'Found {panicing_count:d} overdue / expired and '
                '{expiring_count:d} expiring certificates.',
                panicing_count=len(panicing),
                expiring_count=len(expiring))

            d1 = (defer.gatherResults(
                [
                    self._issue_cert(server_names).addErrback(
                        self._panic, server_names) for server_names in panicing
                ],
                consumeErrors=True).addCallback(done_panicing))
            d2 = defer.gatherResults([
                self.issue_cert(server_names).addErrback(lambda f: log.failure(
                    u'Error issuing certificate for: {server_names!r}',
                    f,
                    server_names=server_names)) for server_names in expiring
            ],
                                     consumeErrors=True)
            return defer.gatherResults([d1, d2], consumeErrors=True)

        def done_panicing(ignored):
            self.ready = True
            for d in list(self._waiting):
                d.callback(None)
            self._waiting = []

        self._ongoing_check = (self.cert_store.as_dict().addCallback(
            check).addErrback(lambda f: log.failure(
                u'Error in scheduled certificate check.', f)))
        return self._ongoing_check

    def issue_cert(self, server_names):
        """
        Issue a new cert for a particular list of FQDNs.

        If an existing cert exists, it will be replaced with the new cert.  If
        issuing is already in progress for the given name, a second issuing
        process will *not* be started.

        :param str server_names: The comma separated list of names to issue a
            cert for.

        :rtype: ``Deferred``
        :return: A deferred that fires when issuing is complete.
        """
        canonical_names = self._canonicalNames(server_names)

        def finish(result):
            _, waiting = self._issuing.pop(canonical_names)
            for d in waiting:
                d.callback(result)

        # d_issue is assigned below, in the conditional, since we may be
        # creating it or using the existing one.
        d = defer.Deferred(lambda _: d_issue.cancel())
        if canonical_names in self._issuing:
            d_issue, waiting = self._issuing[canonical_names]
            waiting.append(d)
        else:
            d_issue = self._issue_cert(canonical_names)
            waiting = [d]
            self._issuing[canonical_names] = (d_issue, waiting)
            # Add the callback afterwards in case we're using a client
            # implementation that isn't actually async
            d_issue.addBoth(finish)
        return d

    @staticmethod
    def _canonicalNames(server_names):
        """
        Return the canonical representation for `server_names`.
        """
        names = [n.strip() for n in server_names.split(',')]
        return ','.join(names)

    def _issue_cert(self, server_names):
        """
        Issue a new cert for the list of server_names.

        `server_names` is already canonized.
        """
        names = [n.strip() for n in server_names.split(',')]

        log.info('Requesting a certificate for {server_names!r}.',
                 server_names=server_names)
        key = self._generate_key()
        objects = [
            pem.Key(
                key.private_bytes(
                    encoding=serialization.Encoding.PEM,
                    format=serialization.PrivateFormat.TraditionalOpenSSL,
                    encryption_algorithm=serialization.NoEncryption()))
        ]

        @defer.inlineCallbacks
        def answer_to_order(orderr):
            """
            Answer the challenges associated with the order.
            """
            for authorization in orderr.authorizations:
                yield answer_challenge(
                    authorization,
                    self._client,
                    self._responders,
                    clock=self._clock,
                )
            certificate = yield get_certificate(orderr,
                                                self._client,
                                                clock=self._clock)
            defer.returnValue(certificate)

        def got_cert(certr):
            """
            Called when we got a certificate.
            """
            # The certificate is returned as chain.
            objects.extend(pem.parse(certr.body))
            self.cert_store.store(','.join(names), objects)

        return (self._client.submit_order(
            key, names).addCallback(answer_to_order).addCallback(got_cert))

    def when_certs_valid(self):
        """
        Get a notification once the startup check has completed.

        When the service starts, an initial check is made immediately; the
        deferred returned by this function will only fire once reissue has been
        attempted for any certificates within the panic interval.

        ..  note:: The reissue for any of these certificates may not have been
            successful; the panic callback will be invoked for any certificates
            in the panic interval that failed reissue.

        :rtype: ``Deferred``
        :return: A deferred that fires once the initial check has resolved.
        """
        if self.ready:
            return defer.succeed(None)
        d = defer.Deferred()
        self._waiting.append(d)
        return d

    def start(self):
        """
        Like startService, but will return a deferred once the service was
        started and operational.
        """
        Service.startService(self)

        def cb_start(result):
            """
            Called when the client is ready for operation.
            """
            self._timer_service = TimerService(
                self.check_interval.total_seconds(), self._check_certs)
            self._timer_service.clock = self._clock
            self._timer_service.startService()

        return self._client.start(email=self._email).addCallback(cb_start)

    def startService(self):
        """
        Start operating the service.

        See `when_certs_valid` if you want to be notified when all the
        certificate from the storage were validated after startup.
        """
        self.start().addErrback(self._panic, 'FAIL-TO-START')

    def stopService(self):
        Service.stopService(self)
        self.ready = False
        for d in list(self._waiting):
            d.cancel()
        self._waiting = []

        def stop_timer(ignored):
            if not self._timer_service:
                return
            return self._timer_service.stopService()

        def cleanup(ignored):
            self._timer_service = None

        return (self._client.stop().addBoth(tap(stop_timer)).addBoth(
            tap(cleanup)))
コード例 #7
0
ファイル: metrics.py プロジェクト: dragorosson/otter
class MetricsService(Service, object):
    """
    Service collects metrics on continuous basis
    """

    def __init__(self, reactor, config, log, clock=None, collect=None):
        """
        Initialize the service by connecting to Cassandra and setting up
        authenticator

        :param reactor: Twisted reactor for connection purposes
        :param dict config: All the config necessary to run the service.
            Comes from config file
        :param IReactorTime clock: Optional reactor for timer purpose
        """
        self._client = connect_cass_servers(reactor, config['cassandra'])
        self.log = log
        self.reactor = reactor
        self._divergent_groups = {}
        self.divergent_timeout = get_in(
            ['metrics', 'divergent_timeout'], config, 3600)
        self._service = TimerService(
            get_in(['metrics', 'interval'], config, default=60),
            collect or self.collect,
            reactor,
            config,
            self.log,
            client=self._client,
            authenticator=generate_authenticator(reactor, config['identity']))
        self._service.clock = clock or reactor

    @defer.inlineCallbacks
    def collect(self, *a, **k):
        try:
            metrics = yield collect_metrics(*a, **k)
            self._divergent_groups, to_log = unchanged_divergent_groups(
                self.reactor, self._divergent_groups, self.divergent_timeout,
                metrics)
            for group, duration in to_log:
                self.log.err(
                    ValueError(""),  # Need to give an exception to log err
                    ("Group {group_id} of {tenant_id} remains diverged "
                     "and unchanged for {divergent_time}"),
                    tenant_id=group.tenant_id, group_id=group.group_id,
                    desired=group.desired, actual=group.actual,
                    pending=group.pending,
                    divergent_time=str(timedelta(seconds=duration)))
        except Exception:
            self.log.err(None, "Error collecting metrics")

    def startService(self):
        """
        Start this service by starting internal TimerService
        """
        Service.startService(self)
        return self._service.startService()

    def stopService(self):
        """
        Stop service by stopping the timerservice and disconnecting cass client
        """
        Service.stopService(self)
        d = self._service.stopService()
        return d.addCallback(lambda _: self._client.disconnect())
コード例 #8
0
 def stopService(self):
     log.msg("Shutting down script runner service ...")
     return TimerService.stopService(self)
コード例 #9
0
    def update_dummy_chunks_size(self,
                                 old_limit_mib=None, new_limit_mib=None):
        """
        Whenever the situation with the chunks could have been changed,
        update the dummy chunks: create new ones if there is a lack of them,
        or remove unneeded ones if there is an excess of them.

        @param old_limit_mib: The previous total amount of chunks to keep
                              in storage, assumed 0 if None.
        @type old_limit_mib: numbers.Integral, NoneType
        @param new_limit_mib: The new total amount of chunks to keep
                              in storage, taken from the settings if None.
        @type new_limit_mib: numbers.Integral, NoneType
        """
        # For clarity only
        super(ChunkStorageFS, self).update_dummy_chunks_size(old_limit_mib,
                                                             new_limit_mib)

        # TODO! TODO! TODO! we don't bother with dummy chunks for now
        logger.warn('update_dummy_chunks_size() disabled')
        return

        if new_limit_mib is None:
            assert old_limit_mib is None
            new_limit_mib = \
                HostQueries.HostSettings.get(Queries.Settings
                                                    .MAX_STORAGE_SIZE_MIB)

        # This two variables will be used to specify the progress of the task.
        num, of = 0, 0
        _operation = '<generic operation>'


        @exceptions_logged(logger)
        def timercb():
            """
            Callback function called on timer firing.
            """
            if (num, of) != (0, 0):
                logger_status_chunks_op.info(
                    'The chunk reallocation takes too long, completed %i/%i',
                    num, of,
                    extra={'_type': 'chunks_allocation.progress',
                           'num': num,
                           'of': of})


        timer_service = TimerService(1.0, timercb)

        # If the task takes more than 3 seconds,
        # start notifying about the progress
        _callLater = reactor.callLater  # pylint:disable=E1101,C0103
        # Won't worry about deferToThread here, cause it is very fast.
        long_task_timer = _callLater(3.0, timer_service.startService)

        logger.debug('Resizing dummy chunk set from %s to %s',
                     old_limit_mib, new_limit_mib)

        with self.__chunk_op_lock:
            try:
                # Check for dummy chunks before the check for present files,
                # as the check for dummy chunks also may remove some
                # of the files.

                # What dummy chunks are available?
                # list, so it can be used twice.
                # TODO: do we need to use it twice?
                dummy_chunk_uuids = list(self.__get_dummy_chunk_uuids_on_fs())
                how_many_dummy_chunks = len(dummy_chunk_uuids)

                # What chunk files are present on the FS,...
                present_chunk_filenames_iter = \
                    self.__get_chunk_filenames_on_fs(self.__chunk_dir)
                # ... and what are the chunk UUIDs?
                # present_chunk_uuids = \
                #     self.__class__.convert_chunk_filenames_to_uuids(
                #         present_chunk_filenames)

                # How many bytes/MiB do we need to have preallocated?
                reserved_mib = long(new_limit_mib)
                reserved_size = reserved_mib * 0x100000

                # How many bytes/MiB is preallocated already?
                present_chunk_size = \
                    sum(os.stat(f).st_size
                            for f in present_chunk_filenames_iter)
                del present_chunk_filenames_iter  # help GC
                present_chunks_in_mib = \
                    round_up_to_multiply(present_chunk_size,
                                         0x100000) // 0x100000

                if reserved_mib > present_chunks_in_mib:
                    # Add new chunks
                    how_many_new_chunks = reserved_mib - present_chunks_in_mib
                    of = how_many_new_chunks
                    _operation = 'allocation'

                    for u in self.__create_some_dummy_chunks(
                                 how_many_new_chunks):
                        num += 1

                elif reserved_mib < present_chunks_in_mib:
                    # Try to remove some dummy chunks...
                    how_many_chunks_try_to_remove = \
                        present_chunks_in_mib - reserved_mib

                    # But we cannot remove more than len(dummy_chunk_uuids)!
                    if how_many_dummy_chunks < how_many_chunks_try_to_remove:
                        logger.debug('Trying to remove %i chunks, '
                                         'but only %i dummy chunks available!',
                                     how_many_chunks_try_to_remove,
                                     how_many_dummy_chunks)

                    how_many_chunks_to_remove = \
                        min(how_many_chunks_try_to_remove,
                            how_many_dummy_chunks)
                    of = how_many_chunks_to_remove
                    _operation = 'removing'

                    chunk_uuids_to_delete = take(how_many_chunks_to_remove,
                                                 dummy_chunk_uuids)

                    for u in self.delete_some_dummy_chunks(
                                 how_many_chunks_to_remove,
                                 chunk_uuids_to_delete):
                        num += 1

            except Exception as e:
                logger_status_chunks_op_error.error(
                    'The chunk %s failed: %r',
                    _operation, e,
                    extra={'_type': 'chunks_allocation.error',
                           '_exc': e,
                           '_tb': traceback.format_exc()})

            finally:
                # We've done with the chunks allocation.
                # Now stop the timer, and manually report that 100%
                # of the work is done.
                if (not long_task_timer.called and
                    not long_task_timer.cancelled):
                    long_task_timer.cancel()

                if timer_service.running:
                    timer_service.stopService()

                timercb()
コード例 #10
0
ファイル: service.py プロジェクト: habnabit/txacme
class AcmeIssuingService(Service):
    """
    A service for keeping certificates up to date by using an ACME server.

    :param .ICertificateStore cert_store: The certificate store containing the
        certificates to manage.
    :param ~txacme.client.Client client: The ACME client to use.  Typically
        constructed with `Client.from_url <txacme.client.Client.from_url>`.
    :param clock: ``IReactorTime`` provider; usually the reactor, when not
        testing.

    :type responders: List[`.IResponder`]
    :param responders: Challenge responders.  Usually only one responder is
        needed; if more than one responder for the same type is provided, only
        the first will be used.
    :param ~datetime.timedelta check_interval: How often to check for expiring
        certificates.
    :param ~datetime.timedelta reissue_interval: If a certificate is expiring
        in less time than this interval, it will be reissued.
    :param ~datetime.timedelta panic_interval: If a certificate is expiring in
        less time than this interval, and reissuing fails, the panic callback
        will be invoked.

    :type panic: Callable[[Failure, `str`], Deferred]
    :param panic: A callable invoked with the failure and server name when
        reissuing fails for a certificate expiring in the ``panic_interval``.
        For example, you could generate a monitoring alert.  The default
        callback logs a message at *CRITICAL* level.
    :param generate_key: A 0-arg callable used to generate a private key for a
        new cert.  Normally you would not pass this unless you have specialized
        key generation requirements.
    """
    cert_store = attr.ib()
    _client = attr.ib()
    _clock = attr.ib()
    _responders = attr.ib()
    check_interval = attr.ib(default=timedelta(days=1))
    reissue_interval = attr.ib(default=timedelta(days=30))
    panic_interval = attr.ib(default=timedelta(days=15))
    _panic = attr.ib(default=_default_panic)
    _generate_key = attr.ib(default=partial(generate_private_key, u'rsa'))
    _waiting = attr.ib(default=attr.Factory(list))
    ready = False

    def _now(self):
        """
        Get the current time.
        """
        return clock_now(self._clock)

    def _check_certs(self):
        """
        Check all of the certs in the store, and reissue any that are expired
        or close to expiring.
        """
        def check(certs):
            panicing = set()
            expiring = set()
            for server_name, objects in certs.items():
                if len(objects) == 0:
                    panicing.add(server_name)
                for o in filter(lambda o: isinstance(o, Certificate), objects):
                    cert = x509.load_pem_x509_certificate(
                        o.as_bytes(), default_backend())
                    until_expiry = cert.not_valid_after - self._now()
                    if until_expiry <= self.panic_interval:
                        panicing.add(server_name)
                    elif until_expiry <= self.reissue_interval:
                        expiring.add(server_name)
            d1 = (
                gatherResults(
                    [self._issue_cert(server_name)
                     .addErrback(self._panic, server_name)
                     for server_name in panicing],
                    consumeErrors=True)
                .addCallback(done_panicing))
            d2 = gatherResults(
                [self._issue_cert(server_name)
                 .addErrback(
                     lambda f: log.failure(
                         u'Error issuing certificate for: {server_name!r}',
                         f, server_name=server_name))
                 for server_name in expiring],
                consumeErrors=True)
            return gatherResults([d1, d2], consumeErrors=True)

        def done_panicing(ignored):
            self.ready = True
            for d in list(self._waiting):
                d.callback(None)
            self._waiting = []

        return (
            self._register()
            .addCallback(lambda _: self.cert_store.as_dict())
            .addCallback(check))

    def _issue_cert(self, server_name):
        """
        Issue a new cert for a particular name.
        """
        key = self._generate_key()
        objects = [
            Key(key.private_bytes(
                encoding=serialization.Encoding.PEM,
                format=serialization.PrivateFormat.TraditionalOpenSSL,
                encryption_algorithm=serialization.NoEncryption()))]

        def answer_and_poll(authzr):
            def got_challenge(r):
                responder, response = r

                def stop_responding(result):
                    return responder.stop_responding(response)

                return (
                    poll_until_valid(authzr, self._clock, self._client)
                    .addBoth(tap(stop_responding)))
            return (
                answer_challenge(authzr, self._client, self._responders)
                .addCallback(got_challenge))

        def got_cert(certr):
            objects.append(
                Certificate(
                    x509.load_der_x509_certificate(
                        certr.body, default_backend())
                    .public_bytes(serialization.Encoding.PEM)))
            return certr

        def got_chain(chain):
            for certr in chain:
                got_cert(certr)
            return objects

        return (
            self._client.request_challenges(fqdn_identifier(server_name))
            .addCallback(answer_and_poll)
            .addCallback(lambda ign: self._client.request_issuance(
                CertificateRequest(
                    csr=csr_for_names([server_name], key))))
            .addCallback(got_cert)
            .addCallback(self._client.fetch_chain)
            .addCallback(got_chain)
            .addCallback(partial(self.cert_store.store, server_name)))

    def _register(self):
        """
        Register if needed.
        """
        def _registered(ign):
            self._registered = True
        if self._registered:
            return succeed(None)
        else:
            return (
                self._client.register()
                .addCallback(self._client.agree_to_tos)
                .addCallback(_registered))

    def when_certs_valid(self):
        """
        Get a notification once the startup check has completed.

        When the service starts, an initial check is made immediately; the
        deferred returned by this function will only fire once reissue has been
        attempted for any certificates within the panic interval.

        ..  note:: The reissue for any of these certificates may not have been
            successful; the panic callback will be invoked for any certificates
            in the panic interval that failed reissue.

        :rtype: ``Deferred``
        :return: A deferred that fires once the initial check has resolved.
        """
        if self.ready:
            return succeed(None)
        d = Deferred()
        self._waiting.append(d)
        return d

    def startService(self):
        Service.startService(self)
        self._registered = False
        self._timer_service = TimerService(
            self.check_interval.total_seconds(), self._check_certs)
        self._timer_service.clock = self._clock
        self._timer_service.startService()

    def stopService(self):
        Service.stopService(self)
        self.ready = False
        self._registered = False
        for d in list(self._waiting):
            d.cancel()
        self._waiting = []
        return self._timer_service.stopService()
コード例 #11
0
 def stopService(self):
     ## bug in TimerService if you stop when hasnt yet started...
     ## because of condition "not hasattr(self, '_loop')"
     if self.running:
         return TimerService.stopService(self)
コード例 #12
0
ファイル: Server.py プロジェクト: BAN1993/BANServer-twisted
class Server(ServerInterface.ServerBase, ServerInterface.ClientManager):

    m_isRunning = False

    m_config = None
    m_timer = None

    m_port = 0
    m_connectorServer = None
    m_playerManager = None
    #m_gameSvrClient = None

    m_centerCliHost = ""
    m_centerCliPort = 0
    m_centerClients = None

    def init(self, subtype, conf):
        cfgip = str(conf.get("configsvr", "host"))
        cfgport = int(conf.get("configsvr", "port"))
        self.m_config = ConfigClient.ConfigClent(self, subtype,
                                                 Base.SVR_TYPE_SRS, cfgip,
                                                 cfgport)

        self.m_connectorServer = ConnectorServer.ConnectorServer(self)
        self.m_centerClients = ClientManager.ClientManager(self)
        self.m_playerManager = PlayerManager.PlayerManager(self)

    def timer(self):
        self.m_playerManager.timer()

    def run(self):
        self.m_config.connect(self.configCallBack)

        self.m_timer = TimerService(1, self.timer)
        self.m_timer.startService()

        #要放在最后一步
        from twisted.internet import reactor
        self.m_isRunning = True
        logging.info("reactor run")
        reactor.run()

    def configCallBack(self, flag):
        if flag:
            self.m_connectorServer.begin(self.m_config.getPort())

            sql = "SELECT CONCAT(cast(A.id AS CHAR),'$$$',cast(B.ip AS CHAR),'$$$',cast(A. PORT AS CHAR)) FROM config_svr A,config_routing_table B WHERE A.svrtype=2 AND A.svrid=B.id AND A.hide=0"
            self.m_config.GetConfigBySql(sql, self.getGameServerConfigCB)
        else:
            logging.error("connect config error and return")
            self.stop()

    def getGameServerConfigCB(self, flag, retstr):
        if flag and len(retstr) > 0:
            for i in range(len(retstr)):
                logging.info("index=%d,str=%s" % (i, retstr[i]))
                strconfig = str(retstr[i])
                tab = strconfig.split("$$$")
                appid = int(tab[0])
                self.m_centerCliHost = str(tab[1])
                self.m_centerCliPort = int(tab[2])
                self.m_centerClients.addConnect(appid, self.m_centerCliHost,
                                                self.m_centerCliPort)
        else:
            logging.error("get gameserver config error")
            self.stop()

    def stop(self):
        if self.m_timer:
            self.m_timer.stopService()
        if self.m_isRunning:
            from twisted.internet import reactor
            if not reactor._stopped:
                logging.info("stop reactor")
                reactor.stop()
            else:
                logging.info("try stop ractor,but is stopped")
        else:
            logging.info("try stop svr,but is not running")

    # Client
    def newClient(self, conn):
        self.m_playerManager.newClient(conn)

    def recvFromClient(self, conn, packlen, appid, numid, xyid, data):
        self.m_playerManager.recvFromClient(conn, packlen, appid, numid, xyid,
                                            data)

    def loseClient(self, conn):
        self.m_playerManager.loseClient(conn)

    # GameSver
    def recvData(self, packlen, appid, srcappid, numid, xyid, data):
        self.m_playerManager.recvFromServer(packlen, appid, srcappid, numid,
                                            xyid, data)

    def sendToServer(self, data):
        self.m_centerClients.sendData(data)
コード例 #13
0
ファイル: Server.py プロジェクト: BAN1993/BANServer-twisted
class Server(ServerInterface.ServerBase):

    m_isRunning = False

    m_config = None
    m_timer = None

    m_port = 0
    m_connectorServer = None

    def init(self, subtype, conf):
        cfgip = str(conf.get("configsvr", "host"))
        cfgport = int(conf.get("configsvr", "port"))
        self.m_config = ConfigClient.ConfigClent(self, subtype,
                                                 Base.SVR_TYPE_CENTER, cfgip,
                                                 cfgport)

        self.m_connectorServer = ConnectorServer.ConnectorServer(self)

        #gDBManager.init(conf)

    def run(self):
        self.m_config.connect(self.configCallBack)

        self.m_timer = TimerService(1, self.timer)
        self.m_timer.startService()

        # 要放在最后一步
        from twisted.internet import reactor
        self.m_isRunning = True
        logging.info("reactor run")
        reactor.run()

    def timer(self):
        pass

    def configCallBack(self, flag):
        if flag:
            configstr = self.m_config.getConfig()
            configstr = "{" + configstr + "}"
            tab = eval(configstr)
            if tab.has_key('dbip') and tab.has_key('dbport') and tab.has_key(
                    'dbuser') and tab.has_key('dbpwd') and tab.has_key(
                        'dbname'):
                gDBManager.init(tab['dbip'], tab['dbport'], tab['dbuser'],
                                tab['dbpwd'], tab['dbname'])
            else:
                logging.error("db config error")
                self.stop()
                return
            self.m_connectorServer.begin(self.m_config.getPort())

        else:
            logging.error("connect config error and return")
            self.stop()

    def stop(self):
        self.m_timer.stopService()
        if self.m_isRunning:
            from twisted.internet import reactor
            if not reactor._stopped:
                logging.info("stop reactor")
                reactor.stop()
            else:
                logging.info("try stop ractor,but is stopped")
        else:
            logging.info("try stop svr,but is not running")

    def newClient(self, conn):
        logging.info("conn ip=%s,appid=%d" %
                     (conn.transport.hostname, conn.m_numid))

    def recvFromClient(self, conn, packlen, appid, numid, xyid, data):
        self.selectProtocol(conn, packlen, appid, numid, xyid, data)

    def loseClient(self, conn):
        logging.info("conn ip=%s" % (conn.transport.hostname))

    def selectProtocol(self, conn, packlen, appid, numid, xyid, data):
        logging.debug("packlen=%d,appid=%d,srcappid=%d,numid=%d,xyid=%d" %
                      (packlen, appid, conn.m_numid, numid, xyid))
        if xyid == ProtocolSRS.XYID_SRS_REQ_LOGIN:
            req = ProtocolSRS.ReqLogin()
            ret = req.make(data)
            logging.info("ReqLogin:connid=%d,userid=%s,pwd=%s" %
                         (req.connid, req.userid, req.password))

            resp = ProtocolSRS.RespLogin()
            resp.connid = req.connid

            sql = "select numid,passwd from players where userid='%s'" % req.userid
            ret, row, rslt = gDBManager.select(sql)
            if not ret:
                resp.flag = resp.FLAG.DBERR
                logging.error("select ret err,sql=%s" % sql)
            elif row <= 0:
                resp.flag = resp.FLAG.NOUSER
                logging.info("userid=%s select no data" % req.userid)
            else:
                if str(rslt[0][1]) == req.password:
                    resp.flag = resp.FLAG.SUCCESS
                    resp.numid = int(rslt[0][0])
                    logging.info("userid=%s login success,numid=%d" %
                                 (req.userid, resp.numid))
                else:
                    resp.flag = resp.FLAG.PWDERR
                    logging.info("userid=%s pwd err" % req.userid)

            buf = resp.pack()
            conn.sendData(buf)

        else:
            logging.warning("unknown xy,xyid=%d" % xyid)
コード例 #14
0
class TimerServiceTests(TestCase):
    """
    Tests for L{twisted.application.internet.TimerService}.

    @type timer: L{TimerService}
    @ivar timer: service to test

    @type clock: L{task.Clock}
    @ivar clock: source of time

    @type deferred: L{Deferred}
    @ivar deferred: deferred returned by L{TimerServiceTests.call}.
    """
    def setUp(self):
        """
        Set up a timer service to test.
        """
        self.timer = TimerService(2, self.call)
        self.clock = self.timer.clock = task.Clock()
        self.deferred = Deferred()

    def call(self):
        """
        Function called by L{TimerService} being tested.

        @returns: C{self.deferred}
        @rtype: L{Deferred}
        """
        return self.deferred

    def test_startService(self):
        """
        When L{TimerService.startService} is called, it marks itself
        as running, creates a L{task.LoopingCall} and starts it.
        """
        self.timer.startService()
        self.assertTrue(self.timer.running, "Service is started")
        self.assertIsInstance(self.timer._loop, task.LoopingCall)
        self.assertIdentical(self.clock, self.timer._loop.clock)
        self.assertTrue(self.timer._loop.running, "LoopingCall is started")

    def test_startServiceRunsCallImmediately(self):
        """
        When L{TimerService.startService} is called, it calls the function
        immediately.
        """
        result = []
        self.timer.call = (result.append, (None, ), {})
        self.timer.startService()
        self.assertEqual([None], result)

    def test_startServiceUsesGlobalReactor(self):
        """
        L{TimerService.startService} uses L{internet._maybeGlobalReactor} to
        choose the reactor to pass to L{task.LoopingCall}
        uses the global reactor.
        """
        otherClock = task.Clock()

        def getOtherClock(maybeReactor):
            return otherClock

        self.patch(internet, "_maybeGlobalReactor", getOtherClock)
        self.timer.startService()
        self.assertIdentical(otherClock, self.timer._loop.clock)

    def test_stopServiceWaits(self):
        """
        When L{TimerService.stopService} is called while a call is in progress.
        the L{Deferred} returned doesn't fire until after the call finishes.
        """
        self.timer.startService()
        d = self.timer.stopService()
        self.assertNoResult(d)
        self.assertEqual(True, self.timer.running)
        self.deferred.callback(object())
        self.assertIdentical(self.successResultOf(d), None)

    def test_stopServiceImmediately(self):
        """
        When L{TimerService.stopService} is called while a call isn't in progress.
        the L{Deferred} returned has already been fired.
        """
        self.timer.startService()
        self.deferred.callback(object())
        d = self.timer.stopService()
        self.assertIdentical(self.successResultOf(d), None)

    def test_failedCallLogsError(self):
        """
        When function passed to L{TimerService} returns a deferred that errbacks,
        the exception is logged, and L{TimerService.stopService} doesn't raise an error.
        """
        self.timer.startService()
        self.deferred.errback(Failure(ZeroDivisionError()))
        errors = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(1, len(errors))
        d = self.timer.stopService()
        self.assertIdentical(self.successResultOf(d), None)

    def test_pickleTimerServiceNotPickleLoop(self):
        """
        When pickling L{internet.TimerService}, it won't pickle
        L{internet.TimerService._loop}.
        """
        # We need a pickleable callable to test pickling TimerService. So we
        # can't use self.timer
        timer = TimerService(1, fakeTargetFunction)
        timer.startService()
        dumpedTimer = pickle.dumps(timer)
        timer.stopService()
        loadedTimer = pickle.loads(dumpedTimer)
        nothing = object()
        value = getattr(loadedTimer, "_loop", nothing)
        self.assertIdentical(nothing, value)

    def test_pickleTimerServiceNotPickleLoopFinished(self):
        """
        When pickling L{internet.TimerService}, it won't pickle
        L{internet.TimerService._loopFinished}.
        """
        # We need a pickleable callable to test pickling TimerService. So we
        # can't use self.timer
        timer = TimerService(1, fakeTargetFunction)
        timer.startService()
        dumpedTimer = pickle.dumps(timer)
        timer.stopService()
        loadedTimer = pickle.loads(dumpedTimer)
        nothing = object()
        value = getattr(loadedTimer, "_loopFinished", nothing)
        self.assertIdentical(nothing, value)
コード例 #15
0
    def migrate_chunks(self, old_path, new_path):
        """
        Migrate the chunks from their previous path to the new one.

        @note: Only non-dummy chunks are migrated;
               dummy chunks are removed from the old place and
               not regenerated at the new place,
               please call update_dummy_chunks_size() manually for that.
        """
        assert old_path != new_path, (old_path, new_path)

        # This two variables will be used to specify the progress of the task.
        num, of = 0, 0


        @exceptions_logged(logger)
        def timercb():
            """
            Callback function called on timer firing.
            """
            if (num, of) != (0, 0):
                logger_status_chunks_op.info(
                    'The chunk migration takes too long, completed %i/%i',
                    num, of,
                    extra={'_type': 'chunks_migration.progress',
                           'num': num,
                           'of': of})


        timer_service = TimerService(1.0, timercb)

        # If the task takes more than 3 seconds, start notifying
        # about the progress
        _callLater = reactor.callLater  # pylint:disable=E1101,C0103
        # Won't worry about deferToThread here, cause it is very fast.
        long_task_timer = _callLater(3.0, timer_service.startService)

        with self.__chunk_op_lock:
            try:
                # What chunk files are present on the FS,
                # and what are the chunk UUIDs?
                present_chunk_uuids_iter = self.__get_chunk_uuids_on_fs()

                with db.RDB() as rdbw:
                    dummy_chunks_in_db = \
                        frozenset(HostQueries.HostChunks
                                             .get_all_dummy_chunk_uuids(
                                                  rdbw=rdbw))

                # First, remove all the dummy chunks
                removed_dummy_chunks = []
                for dummy_chunk_uuid in dummy_chunks_in_db:
                    try:
                        assert self.__get_chunk_file_path(dummy_chunk_uuid,
                                                          is_dummy=True,
                                                          dir_path=old_path) \
                               == self.__get_chunk_file_path(dummy_chunk_uuid,
                                                             is_dummy=True)

                        _path = self.__get_chunk_file_path(dummy_chunk_uuid,
                                                           is_dummy=True)
                        if os.path.exists(_path):
                            os.unlink(_path)
                        # If we removed the file successfully, let's append it
                        # to the list of the chunks which are to be removed
                        # from the DB.
                        removed_dummy_chunks.append(dummy_chunk_uuid)
                    except Exception as e:
                        logger.error('Cannot remove dummy chunk %s: %s',
                                     dummy_chunk_uuid, e)
                HostQueries.HostChunks \
                           .delete_dummy_chunks(removed_dummy_chunks)

                # This dictionary maps the chunk UUID
                # to a tuple of the old filename and the new filename.
                #
                # Btw, no need to convert present_chunk_uuids_iter to set
                # and do the set difference, as it is the same complexity
                # as for ... if not in.
                uuid_to_filenames = \
                    {u: (self.__get_chunk_file_path(u,
                                                    is_dummy=False,
                                                    dir_path=old_path),
                         self.__get_chunk_file_path(u,
                                                    is_dummy=False,
                                                    dir_path=new_path))
                         for u in present_chunk_uuids_iter
                         if u not in dummy_chunks_in_db}

                # Now, move the files to the new directory.
                of = len(uuid_to_filenames)
                for u, (old_filename, new_filename) \
                        in uuid_to_filenames.iteritems():

                    logger.debug('Moving chunk %s from %s to %s',
                                 u, old_filename, new_filename)

                    try:
                        with open(old_filename, 'rb') as rfh:
                            with open_wb(new_filename) as wfh:
                                wfh.write(rfh.read())
                    except Exception:
                        logger.error('Cannot move chunk %s from %s to %s',
                                     u, old_filename, new_filename)
                    else:
                        try:
                            os.unlink(old_filename)
                        except Exception:
                            logger.error('Cannot remove chunk file %s',
                                         old_filename)

                    num += 1

            except Exception as e:
                logger_status_chunks_op_error.error(
                    'The chunks migration failed: %r',
                    e,
                    extra={'_type': 'chunks_migration.error',
                           '_exc': e,
                           '_tb': traceback.format_exc()})

            finally:
                if (not long_task_timer.called and
                    not long_task_timer.cancelled):
                    long_task_timer.cancel()

                if timer_service.running:
                    timer_service.stopService()
コード例 #16
0
ファイル: traffic_meter.py プロジェクト: shvar/redfs
class TrafficMeter(object):
    """
    Traffic meter, measuring all the traffic to some direction.

    @note: At the moment, restart of the service (after it has been started)
           is not supported.

    @ivar __start_time: the time of the first registered traffic "tick".
    @type __start_time: datetime

    @ivar __ticks: the sequence of TrafficTick objects
                   (always sorted by the time, increasing!)
                   registering each successfully passed bunch of bytes.
    @invariant: consists_of(self.__ticks, TrafficTick)
    """
    __slots__ = ('__direction_incoming', '__total_bytes', '__ticks',
                 '__ticks_lock', '__start_time', '__timer_service',
                 '__started')


    def __init__(self, is_direction_incoming):
        """
        @param is_direction_incoming: whether the measured direction
            is incoming (otherwise outgoing).
        @type is_direction_incoming: bool
        """
        self.__direction_incoming = is_direction_incoming
        self.__total_bytes = 0
        self.__ticks = deque()
        self.__ticks_lock = Lock()
        self.__start_time = None
        self.__timer_service = TimerService(
                                   TRAFFIC_NOTIFICATION_PERIOD.total_seconds(),
                                   self.__on_timer)
        self.__started = False


    @property
    def started(self):
        return self.__started


    def start(self):
        """
        Start monitoring and reporting.
        """
        with self.__ticks_lock:
            assert not self.__started
            self.__timer_service.startService()
            self.__started = True
            logger.debug('%s traffic meter started', self._name)


    @property
    def _name(self):
        return 'Inbound' if self.__direction_incoming else 'Outbound'


    def stop(self):
        """
        Stop monitoring and reporting.
        """
        with self.__ticks_lock:
            assert self.__started
            self.__started = False
            self.__timer_service.stopService()
            logger.debug('%s traffic meter stopped', self._name)


    @exceptions_logged(logger)
    def __on_timer(self):
        _now = datetime.utcnow()
        _oldest_time_to_consider = _now - THROUGHPUT_SMA_PERIOD

        # Even if the timer is started, we don't report throughput
        # until the first ticks are registered.
        if self.__ticks:
            with self.__ticks_lock:
                # Remove (leftmost) ticks which are too old to consider.
                while (self.__ticks and
                       self.__ticks[0].time < _oldest_time_to_consider):
                    self.__ticks.popleft()

                # All the remaining ticks now serve for the throughput
                # calculation, and comprise the current SMA window.
                sma_kilobytes = sum(i.bytes for i in self.__ticks) / 1000.0

                # (btw, done with the lock, remaining calculation
                # can be unlocked)

            # How much time passed since the traffic meter has been started?
            uptime_td = TimeDeltaEx.from_timedelta(_now - self.__start_time)

            # If the meter has just been started,
            # we calculate the throughput dividing to the actual uptime
            # rather than the period window.
            sma_td = min(uptime_td, THROUGHPUT_SMA_PERIOD)
            sma_seconds = sma_td.in_seconds()

            kBps = sma_kilobytes / sma_seconds

            # Measurement unit is "kilobytes per second"
            # (not "kibi", not "bits"!)
            logger_traffic_meter.info('%s: %.02f kBps',
                                      'In' if self.__direction_incoming
                                           else 'Out',
                                      kBps,
                                      extra={'is_incoming':
                                                 self.__direction_incoming,
                                             'kBps': kBps})


    @contract_epydoc
    def add_tick(self, bytes):
        """
        Call this function manually to register a new data tick
        passing C{bytes} bytes.

        @param bytes: how many bytes passed in the tick.
        @type bytes: numbers.Number
        """
        assert self.__started
        assert isinstance(bytes, numbers.Number), repr(bytes)

        _now = datetime.utcnow()
        # logger.debug("%s traffic %i bytes", self._name, bytes)
        with self.__ticks_lock:
            self.__ticks.append(TrafficTick(time=_now, bytes=bytes))
            self.__total_bytes += bytes
            if self.__start_time is None:
                self.__start_time = _now
コード例 #17
0
class FeedPollerService(Service):
    """
    Polls AtomHopper feeds
    """
    def __init__(self,
                 agent,
                 url,
                 event_listeners,
                 interval=DEFAULT_INTERVAL,
                 state_store=None,
                 TimerService=TimerService,
                 coiterate=coiterate):
        """
        :param agent: a :class:`twisted.web.client.Agent` to use to poll

        :param url: the url to poll

        :param event_listeners: listeners that handle a particular event
        :type event_listeners: `iterable` of `callables` that take an event
            as an argument

        :param interval: how often to poll, given in seconds - defaults to 10
        :type interval: ``int`` or ``float``

        :param state_store: where to store the current polling state
        :type state_store: :class:`otter.indexer.state.IStateStore` provider

        :param TimerService: factory (not instance) that produces something
            like a :class:`twisted.application.internet.TimerService` -
            defaults to :class:`twisted.application.internet.TimerService`
            (this parameter is mainly used for dependency injection for
            testing)
        :type TimerService: ``callable``

        :param coiterate: function that is used to coiterate tasks - defaults
            to :func:`twisted.internet.task.coiterate` - (this parameter is
            mainly used for dependency injection for testing)
        :type coiterate: ``callable``
        """
        self._url = url
        self._interval = interval

        self._timer_service = TimerService(interval, self._do_poll)

        self._next_url = None

        self._agent = agent
        self._state_store = state_store or DummyStateStore()

        self._event_listeners = event_listeners
        self._poll_timer = timer('FeedPollerService.poll.{0}'.format(url))
        self._fetch_timer = timer('FeedPollerService.fetch.{0}'.format(url))

        self._coiterate = coiterate

    def startService(self):
        """
        Start the feed polling service - called by the twisted
        application when starting up
        """
        self._timer_service.startService()

    def stopService(self):
        """
        Stop the feed polling service - called by the twisted
        application when shutting down

        :return: ``Deferred``
        """
        return self._timer_service.stopService()

    def _fetch(self, url):
        """
        Get atom feed from AtomHopper url
        """
        def _parse(data):
            e = parse(data)
            return e

        def _gotResponse(resp):
            br = _BodyReceiver()

            resp.deliverBody(br)

            return br.finish

        log.msg(format="Fetching url: %(url)r", url=url)
        d = self._agent.request('GET', url, Headers({}), None)
        d.addCallback(_gotResponse)
        d.addCallback(_parse)

        return d

    def _do_poll(self):
        """
        Do one interation of polling AtomHopper.
        """
        start = time.time()

        def _get_next_url(feed):
            self._fetch_timer.update(time.time() - start)
            # next is previous, because AtomHopper is backwards in time
            next_url = previous_link(feed)

            if next_url is not None:
                self._next_url = next_url

            log.msg(format="URLS: %(url)r\n\t->%(next_url)s",
                    url=self._url,
                    next_url=self._next_url)

            sd = self._state_store.save_state(self._next_url)
            sd.addCallback(lambda _: feed)
            return sd

        def _dispatch_entries(feed):
            # Actually sort by updated date.
            sorted_entries = sorted(entries(feed),
                                    key=lambda x: parse_date(updated(x)))
            return self._coiterate(
                chain.from_iterable(((el(entry)
                                      for el in self._event_listeners)
                                     for entry in sorted_entries)))

        def _finish_iteration(ignore):
            self._poll_timer.update(time.time() - start)

        d = self._state_store.get_state()
        d.addCallback(
            lambda saved_url: self._next_url or saved_url or self._url)
        d.addCallback(self._fetch)
        d.addCallback(_get_next_url)
        d.addCallback(_dispatch_entries)
        d.addErrback(log.err)
        d.addBoth(_finish_iteration)
        return d
コード例 #18
0
ファイル: backup_scheduler.py プロジェクト: shvar/redfs
class BackupScheduler(object):
    """The container for the backup schedules.

    Internally, it keeps the list of schedules in always-sorted state;
    they are sorted by the expected time of fire (then by UUID;
    but two schedules with the same UUID always match).

    @todo: The scheduler must be dynamically updated:
           1. if some "backup scheduled" settings are changed;
           2. if some "timezone" setting is changed.

    @note: schedule support is disabled, so this class is not used anymore.
    """
    __slots__ = ('server_process', 'lock',
                 '__schedules', '__schedules_by_host_uuid',
                 '__schedule_check_timer', '__last_reread_from_db')


    @exceptions_logged(logger)
    def __init__(self, server_process):
        assert isinstance(server_process, ServerProcess), \
               repr(server_process)

        self.server_process = server_process
        self.lock = RLock()

        self.__schedule_check_timer = \
            TimerService(BACKUP_SCHEDULES_CHECK_PERIOD.total_seconds(),
                         self.__on_schedule_check_timer)

        self.reread_cache()


    def __repr__(self):
        return u'<BackupScheduler: {} schedule(s)>'\
                   .format(len(self.__schedules))


    @property
    def app(self):
        """
        @rtype: NodeApp
        """
        return self.server_process.app


    @exceptions_logged(logger)
    def __reset(self):
        self.__schedules = []
        self.__schedules_by_host_uuid = defaultdict(set)


    @exceptions_logged(logger)
    def start(self):
        self.__schedule_check_timer.startService()


    @exceptions_logged(logger)
    def stop(self):
        """
        @todo: It is nowhere stopped at the moment, should it be?
        """
        self.__schedule_check_timer.stopService()


    @contract_epydoc
    def add(self, schedule):
        """
        @type schedule: BackupSchedule
        """
        with self.lock:
            assert schedule not in self.__schedules, repr(schedule)
            bisect.insort(self.__schedules, schedule)
            self.__schedules_by_host_uuid[schedule.host_uuid].add(schedule)
            assert self.__schedules == sorted(self.__schedules), 'Not sorted!'


    @contract_epydoc
    def remove(self, schedule):
        """
        @type schedule: BackupSchedule
        """
        with self.lock:
            assert schedule in self.__schedules, repr(schedule)
            index = bisect.bisect(self.__schedules, schedule) - 1
            assert schedule == self.__schedules[index], \
                   (index, schedule, self.__schedules)
            del self.__schedules[index]
            self.__schedules_by_host_uuid[schedule.host_uuid].remove(schedule)
            assert self.__schedules == sorted(self.__schedules), 'Not sorted!'


    @contract_epydoc
    def get_schedules_by_host_uuid(self, host_uuid):
        """
        @type host_uuid: UUID
        @rtype: frozenset
        """
        return frozenset(self.__schedules_by_host_uuid[host_uuid])


    @contract_epydoc
    def get_schedules_older_than(self, dt):
        """
        @param dt: The time, the schedules older than, are returned.
        @type dt: datetime

        @precondition: not is_naive_dt(dt) # repr(dt)

        @returns: The schedules which are older than the "dt".
        @rtype: list
        @postcondition: consists_of(result, BackupSchedule)
        """
        with self.lock:
            i = bisect.bisect(self.__schedules,
                              BackupSchedule._dummy_schedule_for_time(dt))
            return self.__schedules[:i]


    @contract_epydoc
    def reread_cache_for_host(self, host_uuid):
        """
        Reread all the schedules for a single host from the DB.
        It is assumed they've just been updated, so they should not be
        sent back.

        @param host_uuid: UUID of the host which schedules need to be updated.
        @type host_uuid: UUID
        """
        logger.debug('Rereading the schedules on %r for host %r',
                     self.server_process.me, host_uuid)

        with self.lock:
            old_schedules = self.get_schedules_by_host_uuid(host_uuid)
            logger.debug('Removing schedules for host %s: %r',
                         host_uuid, old_schedules)
            for schedule in old_schedules:
                self.remove(schedule)

            with db.RDB() as rdbw:
                new_schedules = \
                    Queries.Settings.get_setting(
                        host_uuid=host_uuid,
                        setting_name=Queries.Settings.BACKUP_SCHEDULE,
                        rdbw=rdbw)

                tz_name = Queries.Settings.get_setting(
                              host_uuid=host_uuid,
                              setting_name=Queries.Settings.TIMEZONE,
                              rdbw=rdbw)

            if tz_name:
                try:
                    tz_info = pytz.timezone(tz_name)
                except:
                    logger.error('Cannot parse timezone %r for host %r, '
                                     'fallback to UTC',
                                 tz_name, host_uuid)
                    tz_info = pytz.utc
            else:
                tz_info = pytz.utc

            logger.debug('Adding new schedules for host %s: %r',
                         host_uuid, new_schedules)
            for schedule in new_schedules:
                self.add(BackupSchedule.from_dict(host_uuid,
                                                  tz_info,
                                                  schedule))


    def reread_cache(self):
        """
        Reread the cache/info from the database.
        """
        self.__last_reread_from_db = utcnow()
        logger.debug('Rereading the schedules on %r', self.server_process.me)

        with self.lock:
            self.__reset()
            for host_uuid, (schedules, tz_name) \
                    in TrustedQueries.TrustedSettings.get_all_schedules() \
                                                     .iteritems():
                #
                if tz_name:
                    try:
                        tz_info = pytz.timezone(tz_name)
                    except:
                        logger.error('Cannot parse timezone %r for host %r, '
                                         'fallback to UTC',
                                     tz_name, host_uuid)
                        tz_info = pytz.utc
                else:
                    tz_info = pytz.utc

                for schedule in schedules:
                    self.add(BackupSchedule.from_dict(host_uuid,
                                                      tz_info,
                                                      schedule))

        logger.debug('Reread %i schedules on %r',
                     len(self.__schedules), self.server_process.me)


    @exceptions_logged(logger)
    def __on_schedule_check_timer(self):
        """
        Called whenever the schedule check timer is fired.
        """
        pass


    @exceptions_logged(logger)
    def __check_schedules(self):

        _now = utcnow()

        with self.lock:
            #
            # First, do we need to reread the schedules from the DB.
            #
            assert isinstance(self.__last_reread_from_db, datetime), \
                   repr(self.__last_reread_from_db)
            maxdelta = timedelta(seconds=BACKUP_SCHEDULES_REREAD_PERIOD
                                             .total_seconds())
            if _now - self.__last_reread_from_db > maxdelta:
                self.reread_cache()

            #
            # Now, we can check the schedules.
            #
            if self.__schedules:
                logger.debug('Checking for suspect schedules '
                                 'at %s among %i schedules...',
                             _now, len(self.__schedules))
            suspect_schedules = self.get_schedules_older_than(_now)
            if suspect_schedules:
                logger.debug('On node %r, the following (%i) schedules '
                                 'have passed their time:\n%s',
                             self.server_process.me, len(suspect_schedules),
                             '\n'.join(repr(s)
                                           for s in suspect_schedules))

                # But what hosts are actually alive at the moment?
                alive_host_uuids = \
                    [h.uuid
                         for h in self.app.known_hosts.alive_peers()]
                if alive_host_uuids:
                    logger.debug('Alive hosts at the moment are: %r',
                                 alive_host_uuids)
                # The schedule will fire a backup only if both its time is out,
                # and its host is alive.
                process_schedules = {sch
                                         for sch in suspect_schedules
                                         if sch.host_uuid in alive_host_uuids}

                if process_schedules:
                    processed_host_uuids = set()
                    logger.debug('The following (%i) schedules will fire:\n%s',
                                 len(suspect_schedules),
                                 '\n'.join(repr(sch)
                                               for sch in process_schedules))

                    # Loop over schedules, and run the backup transactions.
                    for schedule in process_schedules:
                        logger.debug('Firing a backup for schedule %r',
                                         schedule)

                        # TODO: Add "if user is suspended" check when
                        # TODO: BackupScheduler will be uncommented.
                        raise NotImplementedError
                        self.start_scheduled_backup(schedule)

                        new_schedule = schedule.copy()
                        new_schedule.advance_by_period()
                        logger.debug('%r advanced to %r',
                                     schedule, new_schedule)

                        # Remove old schedule; add new if needed.
                        self.remove(schedule)
                        if new_schedule.next_backup_datetime is not None:
                            self.add(new_schedule)

                        processed_host_uuids.add(new_schedule.host_uuid)

                    # We've done with the backup transactions.
                    # Would be cool to update the settings.
                    for host_uuid in processed_host_uuids:
                        schedules = self.get_schedules_by_host_uuid(
                                        host_uuid)
                        logger.debug('Updating the schedules '
                                         'for host %s:\n%r',
                                     host_uuid, schedules)
                        with db.RDB() as rdbw:
                            TrustedQueries.TrustedSettings.set_setting(
                                rdbw=rdbw,
                                host_uuid=host_uuid,
                                setting_name=Queries.Settings.BACKUP_SCHEDULE,
                                setting_value=[s.to_dict()
                                                   for s in schedules],
                                setting_time=_now.replace(tzinfo=None))
                        self.send_updated_schedules_to_host(host_uuid)


    def start_scheduled_backup(self, schedule):
        """Given a schedule, start an appropriate backup transaction."""
        assert isinstance(schedule, BackupSchedule), repr(schedule)

        _manager = self.server_process.tr_manager

        b_tr = _manager.create_new_transaction(
                    name='BACKUP',
                    src=self.server_process.me,
                    dst=self.app.known_hosts[schedule.host_uuid],
                    parent=None,
                    # BACKUP-specific
                    schedule_uuid=schedule.uuid,
                    schedule_name=schedule.name,
                    schedule_paths=schedule.paths)


    @contract_epydoc
    def send_updated_schedules_to_host(self, host_uuid):
        """
        Given an UUID of the host,
        start an appropriate "UPDATE_CONFIGURATION" transaction.

        @type host_uuid: UUID
        """
        _setting_name = Queries.Settings.BACKUP_SCHEDULE

        with db.RDB() as rdbw:
            settings = {_setting_name: Queries.Settings.get_setting(
                                           host_uuid=host_uuid,
                                           setting_name=_setting_name,
                                           direct=True,
                                           with_time=True,
                                           rdbw=rdbw)}

        uc_tr = self.server_process.tr_manager.create_new_transaction(
                    name='UPDATE_CONFIGURATION',
                    src=self.server_process.me,
                    dst=self.app.known_hosts[host_uuid],
                    parent=None,
                    # UPDATE_CONFIGURATION-specific
                    settings=settings)
コード例 #19
0
class MetricsService(Service, object):
    """
    Service collects metrics on continuous basis
    """
    def __init__(self, reactor, config, log, clock=None, collect=None):
        """
        Initialize the service by connecting to Cassandra and setting up
        authenticator

        :param reactor: Twisted reactor for connection purposes
        :param dict config: All the config necessary to run the service.
            Comes from config file
        :param IReactorTime clock: Optional reactor for timer purpose
        """
        self._client = connect_cass_servers(reactor, config['cassandra'])
        self.log = log
        self.reactor = reactor
        self._divergent_groups = {}
        self.divergent_timeout = get_in(['metrics', 'divergent_timeout'],
                                        config, 3600)
        self._service = TimerService(get_in(['metrics', 'interval'],
                                            config,
                                            default=60),
                                     collect or self.collect,
                                     reactor,
                                     config,
                                     self.log,
                                     client=self._client,
                                     authenticator=generate_authenticator(
                                         reactor, config['identity']))
        self._service.clock = clock or reactor

    @defer.inlineCallbacks
    def collect(self, *a, **k):
        try:
            metrics = yield collect_metrics(*a, **k)
            self._divergent_groups, to_log = unchanged_divergent_groups(
                self.reactor, self._divergent_groups, self.divergent_timeout,
                metrics)
            for group, duration in to_log:
                self.log.err(
                    ValueError(""),  # Need to give an exception to log err
                    ("Group {group_id} of {tenant_id} remains diverged "
                     "and unchanged for {divergent_time}"),
                    tenant_id=group.tenant_id,
                    group_id=group.group_id,
                    desired=group.desired,
                    actual=group.actual,
                    pending=group.pending,
                    divergent_time=str(timedelta(seconds=duration)))
        except Exception:
            self.log.err(None, "Error collecting metrics")

    def startService(self):
        """
        Start this service by starting internal TimerService
        """
        Service.startService(self)
        return self._service.startService()

    def stopService(self):
        """
        Stop service by stopping the timerservice and disconnecting cass client
        """
        Service.stopService(self)
        d = self._service.stopService()
        return d.addCallback(lambda _: self._client.disconnect())
コード例 #20
0
class TimerServiceTests(TestCase):
    """
    Tests for L{twisted.application.internet.TimerService}.

    @type timer: L{TimerService}
    @ivar timer: service to test

    @type clock: L{task.Clock}
    @ivar clock: source of time

    @type deferred: L{Deferred}
    @ivar deferred: deferred returned by L{TimerServiceTests.call}.
    """

    def setUp(self):
        """
        Set up a timer service to test.
        """
        self.timer = TimerService(2, self.call)
        self.clock = self.timer.clock = task.Clock()
        self.deferred = Deferred()


    def call(self):
        """
        Function called by L{TimerService} being tested.

        @returns: C{self.deferred}
        @rtype: L{Deferred}
        """
        return self.deferred


    def test_startService(self):
        """
        When L{TimerService.startService} is called, it marks itself
        as running, creates a L{task.LoopingCall} and starts it.
        """
        self.timer.startService()
        self.assertTrue(self.timer.running, "Service is started")
        self.assertIsInstance(self.timer._loop, task.LoopingCall)
        self.assertIdentical(self.clock, self.timer._loop.clock)
        self.assertTrue(self.timer._loop.running, "LoopingCall is started")


    def test_startServiceRunsCallImmediately(self):
        """
        When L{TimerService.startService} is called, it calls the function
        immediately.
        """
        result = []
        self.timer.call = (result.append, (None,), {})
        self.timer.startService()
        self.assertEqual([None], result)


    def test_startServiceUsesGlobalReactor(self):
        """
        L{TimerService.startService} uses L{internet._maybeGlobalReactor} to
        choose the reactor to pass to L{task.LoopingCall}
        uses the global reactor.
        """
        otherClock = task.Clock()
        def getOtherClock(maybeReactor):
            return otherClock
        self.patch(internet, "_maybeGlobalReactor", getOtherClock)
        self.timer.startService()
        self.assertIdentical(otherClock, self.timer._loop.clock)


    def test_stopServiceWaits(self):
        """
        When L{TimerService.stopService} is called while a call is in progress.
        the L{Deferred} returned doesn't fire until after the call finishes.
        """
        self.timer.startService()
        d = self.timer.stopService()
        self.assertNoResult(d)
        self.assertEqual(True, self.timer.running)
        self.deferred.callback(object())
        self.assertIdentical(self.successResultOf(d), None)


    def test_stopServiceImmediately(self):
        """
        When L{TimerService.stopService} is called while a call isn't in progress.
        the L{Deferred} returned has already been fired.
        """
        self.timer.startService()
        self.deferred.callback(object())
        d = self.timer.stopService()
        self.assertIdentical(self.successResultOf(d), None)


    def test_failedCallLogsError(self):
        """
        When function passed to L{TimerService} returns a deferred that errbacks,
        the exception is logged, and L{TimerService.stopService} doesn't raise an error.
        """
        self.timer.startService()
        self.deferred.errback(Failure(ZeroDivisionError()))
        errors = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(1, len(errors))
        d = self.timer.stopService()
        self.assertIdentical(self.successResultOf(d), None)


    def test_pickleTimerServiceNotPickleLoop(self):
        """
        When pickling L{internet.TimerService}, it won't pickle
        L{internet.TimerService._loop}.
        """
        # We need a pickleable callable to test pickling TimerService. So we
        # can't use self.timer
        timer = TimerService(1, fakeTargetFunction)
        timer.startService()
        dumpedTimer = pickle.dumps(timer)
        timer.stopService()
        loadedTimer = pickle.loads(dumpedTimer)
        nothing = object()
        value = getattr(loadedTimer, "_loop", nothing)
        self.assertIdentical(nothing, value)


    def test_pickleTimerServiceNotPickleLoopFinished(self):
        """
        When pickling L{internet.TimerService}, it won't pickle
        L{internet.TimerService._loopFinished}.
        """
        # We need a pickleable callable to test pickling TimerService. So we
        # can't use self.timer
        timer = TimerService(1, fakeTargetFunction)
        timer.startService()
        dumpedTimer = pickle.dumps(timer)
        timer.stopService()
        loadedTimer = pickle.loads(dumpedTimer)
        nothing = object()
        value = getattr(loadedTimer, "_loopFinished", nothing)
        self.assertIdentical(nothing, value)
コード例 #21
0
ファイル: timer.py プロジェクト: istobran/eaEmu
 def stopService(self):
    ## bug in TimerService if you stop when hasnt yet started...
    ## because of condition "not hasattr(self, '_loop')"
    if self.running:
       return TimerService.stopService(self)
コード例 #22
0
ファイル: service.py プロジェクト: mithrandi/txacme
class AcmeIssuingService(Service):
    """
    A service for keeping certificates up to date by using an ACME server.

    :type cert_store: `~txacme.interfaces.ICertificateStore`
    :param cert_store: The certificate store containing the certificates to
        manage.

    :type client_creator: Callable[[], Deferred[`txacme.client.Client`]]
    :param client_creator: A callable called with no arguments for creating the
        ACME client.  For example, ``partial(Client.from_url, reactor=reactor,
        url=LETSENCRYPT_STAGING_DIRECTORY, key=acme_key, alg=RS256)``.
    :param clock: ``IReactorTime`` provider; usually the reactor, when not
        testing.

    :type responders: List[`~txacme.interfaces.IResponder`]
    :param responders: Challenge responders.  Usually only one responder is
        needed; if more than one responder for the same type is provided, only
        the first will be used.
    :param str email: An (optional) email address to use during registration.
    :param ~datetime.timedelta check_interval: How often to check for expiring
        certificates.
    :param ~datetime.timedelta reissue_interval: If a certificate is expiring
        in less time than this interval, it will be reissued.
    :param ~datetime.timedelta panic_interval: If a certificate is expiring in
        less time than this interval, and reissuing fails, the panic callback
        will be invoked.

    :type panic: Callable[[Failure, `str`], Deferred]
    :param panic: A callable invoked with the failure and server name when
        reissuing fails for a certificate expiring in the ``panic_interval``.
        For example, you could generate a monitoring alert.  The default
        callback logs a message at *CRITICAL* level.
    :param generate_key: A 0-arg callable used to generate a private key for a
        new cert.  Normally you would not pass this unless you have specialized
        key generation requirements.
    """
    cert_store = attr.ib()
    _client_creator = attr.ib()
    _clock = attr.ib()
    _responders = attr.ib()
    _email = attr.ib(default=None)
    check_interval = attr.ib(default=timedelta(days=1))
    reissue_interval = attr.ib(default=timedelta(days=30))
    panic_interval = attr.ib(default=timedelta(days=15))
    _panic = attr.ib(default=_default_panic)
    _generate_key = attr.ib(default=partial(generate_private_key, u'rsa'))

    _waiting = attr.ib(default=attr.Factory(list), init=False)
    _issuing = attr.ib(default=attr.Factory(dict), init=False)
    ready = False

    def _now(self):
        """
        Get the current time.
        """
        return clock_now(self._clock)

    def _check_certs(self):
        """
        Check all of the certs in the store, and reissue any that are expired
        or close to expiring.
        """
        log.info('Starting scheduled check for expired certificates.')

        def check(certs):
            panicing = set()
            expiring = set()
            for server_name, objects in certs.items():
                if len(objects) == 0:
                    panicing.add(server_name)
                for o in filter(lambda o: isinstance(o, Certificate), objects):
                    cert = x509.load_pem_x509_certificate(
                        o.as_bytes(), default_backend())
                    until_expiry = cert.not_valid_after - self._now()
                    if until_expiry <= self.panic_interval:
                        panicing.add(server_name)
                    elif until_expiry <= self.reissue_interval:
                        expiring.add(server_name)

            log.info(
                'Found {panicing_count:d} overdue / expired and '
                '{expiring_count:d} expiring certificates.',
                panicing_count=len(panicing),
                expiring_count=len(expiring))

            d1 = (
                gatherResults(
                    [self._with_client(self._issue_cert, server_name)
                     .addErrback(self._panic, server_name)
                     for server_name in panicing],
                    consumeErrors=True)
                .addCallback(done_panicing))
            d2 = gatherResults(
                [self.issue_cert(server_name)
                 .addErrback(
                     lambda f: log.failure(
                         u'Error issuing certificate for: {server_name!r}',
                         f, server_name=server_name))
                 for server_name in expiring],
                consumeErrors=True)
            return gatherResults([d1, d2], consumeErrors=True)

        def done_panicing(ignored):
            self.ready = True
            for d in list(self._waiting):
                d.callback(None)
            self._waiting = []

        return (
            self._ensure_registered()
            .addCallback(lambda _: self.cert_store.as_dict())
            .addCallback(check)
            .addErrback(
                lambda f: log.failure(
                    u'Error in scheduled certificate check.', f)))

    def issue_cert(self, server_name):
        """
        Issue a new cert for a particular name.

        If an existing cert exists, it will be replaced with the new cert.  If
        issuing is already in progress for the given name, a second issuing
        process will *not* be started.

        :param str server_name: The name to issue a cert for.

        :rtype: ``Deferred``
        :return: A deferred that fires when issuing is complete.
        """
        def finish(result):
            _, waiting = self._issuing.pop(server_name)
            for d in waiting:
                d.callback(result)

        # d_issue is assigned below, in the conditional, since we may be
        # creating it or using the existing one.
        d = Deferred(lambda _: d_issue.cancel())
        if server_name in self._issuing:
            d_issue, waiting = self._issuing[server_name]
            waiting.append(d)
        else:
            d_issue = self._with_client(self._issue_cert, server_name)
            waiting = [d]
            self._issuing[server_name] = (d_issue, waiting)
            # Add the callback afterwards in case we're using a client
            # implementation that isn't actually async
            d_issue.addBoth(finish)
        return d

    def _with_client(self, f, *a, **kw):
        """
        Construct a client, and perform an operation with it.
        """
        return self._client_creator().addCallback(f, *a, **kw)

    def _issue_cert(self, client, server_name):
        """
        Issue a new cert for a particular name.
        """
        log.info(
            'Requesting a certificate for {server_name!r}.',
            server_name=server_name)
        key = self._generate_key()
        objects = [
            Key(key.private_bytes(
                encoding=serialization.Encoding.PEM,
                format=serialization.PrivateFormat.TraditionalOpenSSL,
                encryption_algorithm=serialization.NoEncryption()))]

        def answer_and_poll(authzr):
            def got_challenge(stop_responding):
                return (
                    poll_until_valid(authzr, self._clock, client)
                    .addBoth(tap(lambda _: stop_responding())))
            return (
                answer_challenge(authzr, client, self._responders)
                .addCallback(got_challenge))

        def got_cert(certr):
            objects.append(
                Certificate(
                    x509.load_der_x509_certificate(
                        certr.body, default_backend())
                    .public_bytes(serialization.Encoding.PEM)))
            return certr

        def got_chain(chain):
            for certr in chain:
                got_cert(certr)
            log.info(
                'Received certificate for {server_name!r}.',
                server_name=server_name)
            return objects

        return (
            client.request_challenges(fqdn_identifier(server_name))
            .addCallback(answer_and_poll)
            .addCallback(lambda ign: client.request_issuance(
                CertificateRequest(
                    csr=csr_for_names([server_name], key))))
            .addCallback(got_cert)
            .addCallback(client.fetch_chain)
            .addCallback(got_chain)
            .addCallback(partial(self.cert_store.store, server_name)))

    def _ensure_registered(self):
        """
        Register if needed.
        """
        if self._registered:
            return succeed(None)
        else:
            return self._with_client(self._register)

    def _register(self, client):
        """
        Register and agree to the TOS.
        """
        def _registered(regr):
            self._regr = regr
            self._registered = True
        regr = messages.NewRegistration.from_data(email=self._email)
        return (
            client.register(regr)
            .addCallback(client.agree_to_tos)
            .addCallback(_registered))

    def when_certs_valid(self):
        """
        Get a notification once the startup check has completed.

        When the service starts, an initial check is made immediately; the
        deferred returned by this function will only fire once reissue has been
        attempted for any certificates within the panic interval.

        ..  note:: The reissue for any of these certificates may not have been
            successful; the panic callback will be invoked for any certificates
            in the panic interval that failed reissue.

        :rtype: ``Deferred``
        :return: A deferred that fires once the initial check has resolved.
        """
        if self.ready:
            return succeed(None)
        d = Deferred()
        self._waiting.append(d)
        return d

    def startService(self):
        Service.startService(self)
        self._registered = False
        self._timer_service = TimerService(
            self.check_interval.total_seconds(), self._check_certs)
        self._timer_service.clock = self._clock
        self._timer_service.startService()

    def stopService(self):
        Service.stopService(self)
        self.ready = False
        self._registered = False
        for d in list(self._waiting):
            d.cancel()
        self._waiting = []
        return self._timer_service.stopService()
コード例 #23
0
ファイル: poller.py プロジェクト: MariaAbrahms/otter
class FeedPollerService(Service):
    """
    Polls AtomHopper feeds
    """
    def __init__(self, agent, url, event_listeners, interval=DEFAULT_INTERVAL,
                 state_store=None,
                 TimerService=TimerService, coiterate=coiterate):
        """
        :param agent: a :class:`twisted.web.client.Agent` to use to poll

        :param url: the url to poll

        :param event_listeners: listeners that handle a particular event
        :type event_listeners: `iterable` of `callables` that take an event
            as an argument

        :param interval: how often to poll, given in seconds - defaults to 10
        :type interval: ``int`` or ``float``

        :param state_store: where to store the current polling state
        :type state_store: :class:`otter.indexer.state.IStateStore` provider

        :param TimerService: factory (not instance) that produces something
            like a :class:`twisted.application.internet.TimerService` -
            defaults to :class:`twisted.application.internet.TimerService`
            (this parameter is mainly used for dependency injection for
            testing)
        :type TimerService: ``callable``

        :param coiterate: function that is used to coiterate tasks - defaults
            to :func:`twisted.internet.task.coiterate` - (this parameter is
            mainly used for dependency injection for testing)
        :type coiterate: ``callable``
        """
        self._url = url
        self._interval = interval

        self._timer_service = TimerService(interval, self._do_poll)

        self._next_url = None

        self._agent = agent
        self._state_store = state_store or DummyStateStore()

        self._event_listeners = event_listeners
        self._poll_timer = timer('FeedPollerService.poll.{0}'.format(url))
        self._fetch_timer = timer('FeedPollerService.fetch.{0}'.format(url))

        self._coiterate = coiterate

    def startService(self):
        """
        Start the feed polling service - called by the twisted
        application when starting up
        """
        self._timer_service.startService()

    def stopService(self):
        """
        Stop the feed polling service - called by the twisted
        application when shutting down

        :return: ``Deferred``
        """
        return self._timer_service.stopService()

    def _fetch(self, url):
        """
        Get atom feed from AtomHopper url
        """
        def _parse(data):
            e = parse(data)
            return e

        def _gotResponse(resp):
            br = _BodyReceiver()

            resp.deliverBody(br)

            return br.finish

        log.msg(format="Fetching url: %(url)r", url=url)
        d = self._agent.request('GET', url, Headers({}), None)
        d.addCallback(_gotResponse)
        d.addCallback(_parse)

        return d

    def _do_poll(self):
        """
        Do one interation of polling AtomHopper.
        """
        start = time.time()

        def _get_next_url(feed):
            self._fetch_timer.update(time.time() - start)
            # next is previous, because AtomHopper is backwards in time
            next_url = previous_link(feed)

            if next_url is not None:
                self._next_url = next_url

            log.msg(format="URLS: %(url)r\n\t->%(next_url)s",
                    url=self._url, next_url=self._next_url)

            sd = self._state_store.save_state(self._next_url)
            sd.addCallback(lambda _: feed)
            return sd

        def _dispatch_entries(feed):
            # Actually sort by updated date.
            sorted_entries = sorted(entries(feed),
                                    key=lambda x: parse_date(updated(x)))
            return self._coiterate(chain.from_iterable(
                                   ((el(entry) for el in self._event_listeners)
                                    for entry in sorted_entries)))

        def _finish_iteration(ignore):
            self._poll_timer.update(time.time() - start)

        d = self._state_store.get_state()
        d.addCallback(
            lambda saved_url: self._next_url or saved_url or self._url)
        d.addCallback(self._fetch)
        d.addCallback(_get_next_url)
        d.addCallback(_dispatch_entries)
        d.addErrback(log.err)
        d.addBoth(_finish_iteration)
        return d
コード例 #24
0
class RestoreRequestProcessor(object):
    """
    The processor for the externally initiated restore requests.
    """
    __slots__ = ('__server_process', '__restore_timer')


    def __init__(self, server_process):
        assert isinstance(server_process, ServerProcess), \
               repr(server_process)
        self.__server_process = server_process

        self.__restore_timer = \
            TimerService(WEB_RESTORE_PERIOD.total_seconds(),
                         exceptions_logged(logger)(callInThread),
                         self.__poll_restore_requests_in_thread)


    def __poll_restore_requests_in_thread(self):
        """Perform another iteration of polling the restore requests."""
        assert not in_main_thread()

        poll_uuid = gen_uuid()
        logger.debug('Polling restore requests (%s)', poll_uuid)

        restore_request = True
        while restore_request is not None:
            with ds.FDB() as fdbw:
                restore_request = \
                    FDBQueries.RestoreRequests \
                              .atomic_start_oldest_restore_request(fdbw=fdbw)

            logger.debug('Poll (%s) returned %r', poll_uuid, restore_request)
            if restore_request is not None:
                # We've indeed have some restore request that needs processing.

                # Create new "virtual" dataset with all the data
                # to be restored.
                with db.RDB() as rdbw:
                    new_ds_uuid = \
                        Queries.Datasets.restore_files_to_dataset_clone(
                            restore_request.base_ds_uuid,
                            restore_request.paths,
                            restore_request.ts_start,
                            rdbw)

                # Now we know the new dataset to be restored.
                # Btw, write it into the docstore.
                # Doesn't need to be atomic, as only a single node
                # may be processing it at a time.
                with ds.FDB() as fdbw:
                    FDBQueries.RestoreRequests.set_ds_uuid(
                        _id=restore_request._id,
                        new_ds_uuid=new_ds_uuid,
                        fdbw=fdbw)

                # After creating the dataset, let's restore it to all host
                # which are alive.
                _syncer = self.__server_process.app.syncer
                _syncer.restore_dataset_to_lacking_hosts(
                    me=self.__server_process.me,
                    host=None,
                    ds_uuid=new_ds_uuid)

        logger.debug('Polling restore requests (%s) - done', poll_uuid)


    def start(self):
        """Start the processor."""
        assert in_main_thread()
        self.__restore_timer.startService()


    def stop(self):
        """Stop the processor."""
        assert in_main_thread()
        self.__restore_timer.stopService()
コード例 #25
0
class RestoreRequestProcessor(object):
    """
    The processor for the externally initiated restore requests.
    """
    __slots__ = ('__server_process', '__restore_timer')

    def __init__(self, server_process):
        assert isinstance(server_process, ServerProcess), \
               repr(server_process)
        self.__server_process = server_process

        self.__restore_timer = \
            TimerService(WEB_RESTORE_PERIOD.total_seconds(),
                         exceptions_logged(logger)(callInThread),
                         self.__poll_restore_requests_in_thread)

    def __poll_restore_requests_in_thread(self):
        """Perform another iteration of polling the restore requests."""
        assert not in_main_thread()

        poll_uuid = gen_uuid()
        logger.debug('Polling restore requests (%s)', poll_uuid)

        restore_request = True
        while restore_request is not None:
            with ds.FDB() as fdbw:
                restore_request = \
                    FDBQueries.RestoreRequests \
                              .atomic_start_oldest_restore_request(fdbw=fdbw)

            logger.debug('Poll (%s) returned %r', poll_uuid, restore_request)
            if restore_request is not None:
                # We've indeed have some restore request that needs processing.

                # Create new "virtual" dataset with all the data
                # to be restored.
                with db.RDB() as rdbw:
                    new_ds_uuid = \
                        Queries.Datasets.restore_files_to_dataset_clone(
                            restore_request.base_ds_uuid,
                            restore_request.paths,
                            restore_request.ts_start,
                            rdbw)

                # Now we know the new dataset to be restored.
                # Btw, write it into the docstore.
                # Doesn't need to be atomic, as only a single node
                # may be processing it at a time.
                with ds.FDB() as fdbw:
                    FDBQueries.RestoreRequests.set_ds_uuid(
                        _id=restore_request._id,
                        new_ds_uuid=new_ds_uuid,
                        fdbw=fdbw)

                # After creating the dataset, let's restore it to all host
                # which are alive.
                _syncer = self.__server_process.app.syncer
                _syncer.restore_dataset_to_lacking_hosts(
                    me=self.__server_process.me,
                    host=None,
                    ds_uuid=new_ds_uuid)

        logger.debug('Polling restore requests (%s) - done', poll_uuid)

    def start(self):
        """Start the processor."""
        assert in_main_thread()
        self.__restore_timer.startService()

    def stop(self):
        """Stop the processor."""
        assert in_main_thread()
        self.__restore_timer.stopService()