Esempio n. 1
0
    def brokerAttached(self, tubref, broker, isClient):
        assert self.running
        assert tubref

        if tubref in self.tubConnectors:
            # we initiated an outbound connection to this tubref
            if not isClient:
                # however, the connection we got was from an inbound
                # connection. The completed (inbound) connection wins, so
                # abandon the outbound TubConnector
                self.tubConnectors[tubref].shutdown()

            # we don't need the TubConnector any more
            del self.tubConnectors[tubref]

        if tubref in self.brokers:
            # this shouldn't happen: acceptDecision is supposed to drop any
            # existing old connection first.
            self.log("ERROR: unexpected duplicate connection from %s" % tubref)
            raise BananaError("unexpected duplicate connection")
        self.brokers[tubref] = broker

        # now inform everyone who's been waiting on it
        if tubref in self.waitingForBrokers:
            for d in self.waitingForBrokers[tubref]:
                eventual.eventually(d.callback, broker)
            del self.waitingForBrokers[tubref]
Esempio n. 2
0
 def _sendOnly(self, methname, args, kwargs):
     """Send a message like _send, but discard the result."""
     # this is called by sendOnly()
     if self._state in (EVENTUAL, CHAINED):
         self._pendingMethods.append((methname, args, kwargs, _ignore))
     else:
         eventually(self._deliver, methname, args, kwargs, _ignore)
Esempio n. 3
0
 def notifyOnDisconnect(self, callback, *args, **kwargs):
     marker = (callback, args, kwargs)
     if self.disconnected:
         eventually(callback, *args, **kwargs)
     else:
         self.disconnectWatchers.append(marker)
     return marker
Esempio n. 4
0
    def _refLost(self, wref):
        # don't do anything right now, we could be in the middle of all sorts
        # of weird code. both __del__ and weakref callbacks can fire at any
        # time. Almost as bad as threads..

        # instead, do stuff later.
        eventually(self._handleRefLost)
Esempio n. 5
0
    def brokerAttached(self, tubref, broker, isClient):
        assert self.running
        if not tubref:
            # this is an inbound connection from an unauthenticated Tub
            assert not isClient
            # we just track it so we can disconnect it later
            self.unauthenticatedBrokers.append(broker)
            return

        if tubref in self.tubConnectors:
            # we initiated an outbound connection to this tubref
            if not isClient:
                # however, the connection we got was from an inbound
                # connection. The completed (inbound) connection wins, so
                # abandon the outbound TubConnector
                self.tubConnectors[tubref].shutdown()

            # we don't need the TubConnector any more
            del self.tubConnectors[tubref]

        if tubref in self.brokers:
            # this shouldn't happen: acceptDecision is supposed to drop any
            # existing old connection first.
            self.log("ERROR: unexpected duplicate connection from %s" % tubref)
            raise BananaError("unexpected duplicate connection")
        self.brokers[tubref] = broker

        # now inform everyone who's been waiting on it
        if tubref in self.waitingForBrokers:
            for d in self.waitingForBrokers[tubref]:
                eventual.eventually(d.callback, broker)
            del self.waitingForBrokers[tubref]
Esempio n. 6
0
 def notifyOnDisconnect(self, callback, *args, **kwargs):
     marker = (callback, args, kwargs)
     if self.disconnected:
         eventually(callback, *args, **kwargs)
     else:
         self.disconnectWatchers.append(marker)
     return marker
Esempio n. 7
0
 def _sendOnly(self, methname, args, kwargs):
     """Send a message like _send, but discard the result."""
     # this is called by sendOnly()
     if self._state in (EVENTUAL, CHAINED):
         self._pendingMethods.append((methname, args, kwargs, _ignore))
     else:
         eventually(self._deliver, methname, args, kwargs, _ignore)
Esempio n. 8
0
    def _refLost(self, wref):
        # don't do anything right now, we could be in the middle of all sorts
        # of weird code. both __del__ and weakref callbacks can fire at any
        # time. Almost as bad as threads..

        # instead, do stuff later.
        eventually(self._handleRefLost)
Esempio n. 9
0
    def incident_declared(self, triggering_event):
        self.trigger = triggering_event
        # choose a name for the logfile
        now = time.time()
        unique = os.urandom(4)
        unique_s = base32.encode(unique)
        self.name = "incident-%s-%s" % (self.format_time(now), unique_s)
        filename = self.name + ".flog"
        self.abs_filename = os.path.join(self.basedir, filename)
        self.abs_filename_bz2 = self.abs_filename + ".bz2"
        self.abs_filename_bz2_tmp = self.abs_filename + ".bz2.tmp"
        # open logfile. We use both an uncompressed one and a compressed one.
        self.f1 = open(self.abs_filename, "wb")
        self.f2 = bz2.BZ2File(self.abs_filename_bz2_tmp, "wb")

        # write header with triggering_event
        self.f1.write(flogfile.MAGIC)
        self.f2.write(flogfile.MAGIC)
        flogfile.serialize_header(self.f1,
                                  "incident",
                                  trigger=triggering_event,
                                  versions=app_versions.versions,
                                  pid=os.getpid())
        flogfile.serialize_header(self.f2,
                                  "incident",
                                  trigger=triggering_event,
                                  versions=app_versions.versions,
                                  pid=os.getpid())

        if self.TRAILING_DELAY is not None:
            # subscribe to events that occur after this one
            self.still_recording = True
            self.remaining_events = self.TRAILING_EVENT_LIMIT
            self.logger.addObserver(self.trailing_event)

        # use self.logger.buffers, copy events into logfile
        events = list(self.logger.get_buffered_events())
        events.sort(key=O.itemgetter('num'))

        for e in events:
            flogfile.serialize_wrapper(self.f1,
                                       e,
                                       from_=self.tubid_s,
                                       rx_time=now)
            flogfile.serialize_wrapper(self.f2,
                                       e,
                                       from_=self.tubid_s,
                                       rx_time=now)

        self.f1.flush()
        # the BZ2File has no flush method

        if self.TRAILING_DELAY is None:
            self.active = False
            eventually(self.finished_recording)
        else:
            # now we wait for the trailing events to arrive
            self.timer = reactor.callLater(self.TRAILING_DELAY,
                                           self.stop_recording)
Esempio n. 10
0
 def remote_subscribe_to_incidents(self,
                                   observer,
                                   catch_up=False,
                                   since=""):
     s = IncidentSubscription(observer, self._logger, self)
     eventually(s.subscribe, catch_up, since)
     # allow the call to return before we send them any events
     return s
Esempio n. 11
0
    def fire(self, result):
        assert not self._fired
        self._fired = True
        self._result = result

        for w in self._watchers:
            eventual.eventually(w.callback, result)
        del self._watchers
        self.__repr__ = self._fired_repr
Esempio n. 12
0
 def _deliver_queued_messages(self):
     for (methname, args, kwargs, resolver) in self._pendingMethods:
         eventually(self._deliver, methname, args, kwargs, resolver)
     del self._pendingMethods
     # Q: what are the partial-ordering semantics between queued messages
     # and when() clauses that are waiting on this Promise to be resolved?
     for d in self._watchers:
         eventually(d.callback, self._target)
     del self._watchers
Esempio n. 13
0
 def startService(self):
     service.MultiService.startService(self)
     for d,sturdy in self._pending_getReferences:
         d1 = eventual.fireEventually(sturdy)
         d1.addCallback(self.getReference)
         d1.addBoth(lambda res, d=d: d.callback(res))
     del self._pending_getReferences
     for rc in self.reconnectors:
         eventual.eventually(rc.startConnecting, self)
Esempio n. 14
0
 def startService(self):
     service.MultiService.startService(self)
     for d, sturdy in self._pending_getReferences:
         d1 = eventual.fireEventually(sturdy)
         d1.addCallback(self.getReference)
         d1.addBoth(lambda res, d=d: d.callback(res))
     del self._pending_getReferences
     for rc in self.reconnectors:
         eventual.eventually(rc.startConnecting, self)
Esempio n. 15
0
    def fire(self, result):
        assert not self._fired
        self._fired = True
        self._result = result

        for w in self._watchers:
            eventual.eventually(w.callback, result)
        del self._watchers
        self.__repr__ = self._fired_repr
Esempio n. 16
0
 def _deliver_queued_messages(self):
     for (methname, args, kwargs, resolver) in self._pendingMethods:
         eventually(self._deliver, methname, args, kwargs, resolver)
     del self._pendingMethods
     # Q: what are the partial-ordering semantics between queued messages
     # and when() clauses that are waiting on this Promise to be resolved?
     for d in self._watchers:
         eventually(d.callback, self._target)
     del self._watchers
Esempio n. 17
0
 def send(self, event):
     if len(self.queue) < self.MAX_QUEUE_SIZE:
         self.queue.append(event)
     else:
         # preserve old messages, discard new ones.
         #self.messages_dropped += 1
         pass
     if not self.marked_for_sending:
         self.marked_for_sending = True
         eventually(self.start_sending)
Esempio n. 18
0
    def _notifyConnectionLostWatchers(self):
        """
        Call all functions waiting to learn about the loss of the connection of
        this broker.
        """
        watchers = self._connectionLostWatchers
        self._connectionLostWatchers = None

        for w in watchers:
            eventually(w)
Esempio n. 19
0
    def _notifyConnectionLostWatchers(self):
        """
        Call all functions waiting to learn about the loss of the connection of
        this broker.
        """
        watchers = self._connectionLostWatchers
        self._connectionLostWatchers = None

        for w in watchers:
            eventually(w)
Esempio n. 20
0
 def _send(self, methname, args, kwargs):
     """Return a Promise (for the result of the call) when the call is
     eventually made. The call is guaranteed to not fire in this turn."""
     # this is called by send()
     p, resolver = makePromise()
     if self._state in (EVENTUAL, CHAINED):
         self._pendingMethods.append((methname, args, kwargs, resolver))
     else:
         eventually(self._deliver, methname, args, kwargs, resolver)
     return p
Esempio n. 21
0
 def _send(self, methname, args, kwargs):
     """Return a Promise (for the result of the call) when the call is
     eventually made. The call is guaranteed to not fire in this turn."""
     # this is called by send()
     p, resolver = makePromise()
     if self._state in (EVENTUAL, CHAINED):
         self._pendingMethods.append((methname, args, kwargs, resolver))
     else:
         eventually(self._deliver, methname, args, kwargs, resolver)
     return p
Esempio n. 22
0
 def send(self, event):
     if len(self.queue) < self.MAX_QUEUE_SIZE:
         self.queue.append(event)
     else:
         # preserve old messages, discard new ones.
         #self.messages_dropped += 1
         pass
     if not self.marked_for_sending:
         self.marked_for_sending = True
         eventually(self.start_sending)
Esempio n. 23
0
    def testFlush(self):
        results = []
        eventually(results.append, 1)
        eventually(results.append, 2)
        d = flushEventualQueue()

        def _check(res):
            self.failUnlessEqual(results, [1, 2])

        d.addCallback(_check)
        return d
Esempio n. 24
0
    def stop_recording(self):
        self.still_recording = False
        self.active = False
        if self.timer and self.timer.active():
            self.timer.cancel()

        self.logger.removeObserver(self.trailing_event)
        # Observers are notified through an eventually() call, so we might
        # get a few more after the observer is removed. We use
        # self.still_recording to hush them.
        eventually(self.finished_recording)
Esempio n. 25
0
    def finished_recording(self):
        self.f2.close()
        move_into_place(self.abs_filename_bz2_tmp, self.abs_filename_bz2)
        # the compressed logfile has closed successfully. We no longer care
        # about the uncompressed one.
        self.f1.close()
        os.unlink(self.abs_filename)

        # now we can tell the world about our new incident report
        eventually(self.logger.incident_recorded,
                   self.abs_filename_bz2, self.name, self.trigger)
Esempio n. 26
0
    def incident_declared(self, triggering_event):
        self.trigger = triggering_event
        # choose a name for the logfile
        now = time.time()
        unique = os.urandom(4)
        unique_s = base32.encode(unique)
        self.name = "incident-%s-%s" % (self.format_time(now), unique_s)
        filename = self.name + ".flog"
        self.abs_filename = os.path.join(self.basedir, filename)
        self.abs_filename_bz2 = self.abs_filename + ".bz2"
        self.abs_filename_bz2_tmp = self.abs_filename + ".bz2.tmp"
        # open logfile. We use both an uncompressed one and a compressed one.
        self.f1 = open(self.abs_filename, "wb")
        self.f2 = bz2.BZ2File(self.abs_filename_bz2_tmp, "wb")

        # write header with triggering_event
        self.f1.write(flogfile.MAGIC)
        self.f2.write(flogfile.MAGIC)
        flogfile.serialize_header(self.f1, "incident",
                                  trigger=triggering_event,
                                  versions=app_versions.versions,
                                  pid=os.getpid())
        flogfile.serialize_header(self.f2, "incident",
                                  trigger=triggering_event,
                                  versions=app_versions.versions,
                                  pid=os.getpid())

        if self.TRAILING_DELAY is not None:
            # subscribe to events that occur after this one
            self.still_recording = True
            self.remaining_events = self.TRAILING_EVENT_LIMIT
            self.logger.addObserver(self.trailing_event)

        # use self.logger.buffers, copy events into logfile
        events = list(self.logger.get_buffered_events())
        events.sort(lambda a,b: cmp(a['num'], b['num']))
        for e in events:
            flogfile.serialize_wrapper(self.f1, e,
                                       from_=self.tubid_s, rx_time=now)
            flogfile.serialize_wrapper(self.f2, e,
                                       from_=self.tubid_s, rx_time=now)

        self.f1.flush()
        # the BZ2File has no flush method

        if self.TRAILING_DELAY is None:
            self.active = False
            eventually(self.finished_recording)
        else:
            # now we wait for the trailing events to arrive
            self.timer = reactor.callLater(self.TRAILING_DELAY,
                                           self.stop_recording)
Esempio n. 27
0
 def abandonAllRequests(self, why):
     for req in self.waitingForAnswers.values():
         if why.check(*LOST_CONNECTION_ERRORS):
             # map all connection-lost errors to DeadReferenceError, so
             # application code only needs to check for one exception type
             tubid = None
             # since we're creating a new exception object for each call,
             # let's add more information to it
             if self.remote_tubref:
                 tubid = self.remote_tubref.getShortTubID()
             e = DeadReferenceError("Connection was lost", tubid, req)
             why = failure.Failure(e)
         eventually(req.fail, why)
Esempio n. 28
0
 def abandonAllRequests(self, why):
     for req in self.waitingForAnswers.values():
         if why.check(*LOST_CONNECTION_ERRORS):
             # map all connection-lost errors to DeadReferenceError, so
             # application code only needs to check for one exception type
             tubid = None
             # since we're creating a new exception object for each call,
             # let's add more information to it
             if self.remote_tubref:
                 tubid = self.remote_tubref.getShortTubID()
             e = DeadReferenceError("Connection was lost", tubid, req)
             why = failure.Failure(e)
         eventually(req.fail, why)
Esempio n. 29
0
 def _event_received(self, res):
     self.in_flight -= 1
     # the following would be nice to have, but requires very careful
     # analysis to avoid recursion, reentrancy, or even more overload
     #if self.messages_dropped and not self.queue:
     #    count = self.messages_dropped
     #    self.messages_dropped = 0
     #    log.msg(format="log-publisher: %(dropped)d messages dropped",
     #            dropped=count,
     #            facility="foolscap.log.publisher",
     #            level=log.UNUSUAL)
     if not self.marked_for_sending:
         self.marked_for_sending = True
         eventually(self.start_sending)
Esempio n. 30
0
 def _event_received(self, res):
     self.in_flight -= 1
     # the following would be nice to have, but requires very careful
     # analysis to avoid recursion, reentrancy, or even more overload
     #if self.messages_dropped and not self.queue:
     #    count = self.messages_dropped
     #    self.messages_dropped = 0
     #    log.msg(format="log-publisher: %(dropped)d messages dropped",
     #            dropped=count,
     #            facility="foolscap.log.publisher",
     #            level=log.UNUSUAL)
     if not self.marked_for_sending:
         self.marked_for_sending = True
         eventually(self.start_sending)
Esempio n. 31
0
    def incident_declared(self, triggering_event):
        self.trigger = triggering_event
        # choose a name for the logfile
        now = time.time()
        unique = os.urandom(4)
        unique_s = base32.encode(unique)
        self.name = "incident-%s-%s" % (self.format_time(now), unique_s)
        filename = self.name + ".flog"
        self.abs_filename = os.path.join(self.basedir, filename)
        self.abs_filename_bz2 = self.abs_filename + ".bz2"
        self.abs_filename_bz2_tmp = self.abs_filename + ".bz2.tmp"
        # open logfile. We use both an uncompressed one and a compressed one.
        self.f1 = open(self.abs_filename, "wb")
        self.f2 = bz2.BZ2File(self.abs_filename_bz2_tmp, "wb")

        # write header with triggering_event
        header = {"header": {"type": "incident",
                             "trigger": triggering_event,
                             "versions": app_versions.versions,
                             "pid": os.getpid(),
                             }}
        pickle.dump(header, self.f1)
        pickle.dump(header, self.f2)

        if self.TRAILING_DELAY is not None:
            # subscribe to events that occur after this one
            self.still_recording = True
            self.remaining_events = self.TRAILING_EVENT_LIMIT
            self.logger.addObserver(self.trailing_event)

        # use self.logger.buffers, copy events into logfile
        events = list(self.logger.get_buffered_events())
        events.sort(lambda a,b: cmp(a['num'], b['num']))
        for e in events:
            wrapper = {"from": self.tubid_s,
                       "rx_time": now,
                       "d": e}
            pickle.dump(wrapper, self.f1)
            pickle.dump(wrapper, self.f2)

        self.f1.flush()
        # the BZ2File has no flush method

        if self.TRAILING_DELAY is None:
            self.active = False
            eventually(self.finished_recording)
        else:
            # now we wait for the trailing events to arrive
            self.timer = reactor.callLater(self.TRAILING_DELAY,
                                           self.stop_recording)
Esempio n. 32
0
 def test_queued_getref(self):
     t1 = Tub()
     d1 = t1.getReference(self.barry_url)
     d2 = t1.getReference(self.bill_url)
     def _check(res):
         ((barry_success, barry_rref),
          (bill_success, bill_rref)) = res
         self.assertTrue(barry_success)
         self.assertTrue(bill_success)
         self.assertTrue(isinstance(barry_rref, RemoteReference))
         self.assertTrue(isinstance(bill_rref, RemoteReference))
         self.assertFalse(barry_rref == bill_success)
     dl = defer.DeferredList([d1, d2])
     dl.addCallback(_check)
     self.services.append(t1)
     eventually(t1.startService)
     return dl
Esempio n. 33
0
 def test_queued_getref(self):
     t1 = Tub()
     d1 = t1.getReference(self.barry_url)
     d2 = t1.getReference(self.bill_url)
     def _check(res):
         ((barry_success, barry_rref),
          (bill_success, bill_rref)) = res
         self.failUnless(barry_success)
         self.failUnless(bill_success)
         self.failUnless(isinstance(barry_rref, RemoteReference))
         self.failUnless(isinstance(bill_rref, RemoteReference))
         self.failIf(barry_rref == bill_success)
     dl = defer.DeferredList([d1, d2])
     dl.addCallback(_check)
     self.services.append(t1)
     eventually(t1.startService)
     return dl
Esempio n. 34
0
 def test_queued_reconnector(self):
     t1 = Tub()
     bill_connections = []
     barry_connections = []
     t1.connectTo(self.bill_url, bill_connections.append)
     t1.connectTo(self.barry_url, barry_connections.append)
     def _check():
         if len(bill_connections) >= 1 and len(barry_connections) >= 1:
             return True
         return False
     d = self.poll(_check)
     def _validate(res):
         self.assertTrue(isinstance(bill_connections[0], RemoteReference))
         self.assertTrue(isinstance(barry_connections[0], RemoteReference))
         self.assertFalse(bill_connections[0] == barry_connections[0])
     d.addCallback(_validate)
     self.services.append(t1)
     eventually(t1.startService)
     return d
Esempio n. 35
0
 def test_queued_reconnector(self):
     t1 = Tub()
     bill_connections = []
     barry_connections = []
     t1.connectTo(self.bill_url, bill_connections.append)
     t1.connectTo(self.barry_url, barry_connections.append)
     def _check():
         if len(bill_connections) >= 1 and len(barry_connections) >= 1:
             return True
         return False
     d = self.poll(_check)
     def _validate(res):
         self.failUnless(isinstance(bill_connections[0], RemoteReference))
         self.failUnless(isinstance(barry_connections[0], RemoteReference))
         self.failIf(bill_connections[0] == barry_connections[0])
     d.addCallback(_validate)
     self.services.append(t1)
     eventually(t1.startService)
     return d
Esempio n. 36
0
    def add_event(self, facility, level, event):
        # send to observers
        for o in self._immediate_observers:
            o(event)
        for o in self._observers:
            eventual.eventually(o, event)

        # buffer locally
        d1 = self.buffers.get(facility)
        if not d1:
            d1 = self.buffers[facility] = {}
        buffer = d1.get(level)
        if not buffer:
            buffer = d1[level] = collections.deque()
        buffer.append(event)

        # enforce size limits on local buffers
        d2 = self.buffer_sizes.get(facility)
        if d2:
            sizelimit = d2.get(level, self.DEFAULT_SIZELIMIT)
        else:
            sizelimit = self.DEFAULT_SIZELIMIT
        while len(buffer) > sizelimit:
            buffer.popleft()

        # check with incident reporter. This is done synchronously rather
        # than via the usual eventual-send to allow the application to do:
        #  log.msg("abandon ship", level=log.BAD)
        #  sys.exit(1)
        #
        # This means the IncidentReporter will do most of its work right
        # here. The reporter is not allowed to make any foolscap calls, and
        # the call to incident_recorded() is required to pass through an
        # eventual-send.

        if self.active_incident_qualifier:
            try:
                # this might call declare_incident
                self.active_incident_qualifier.event(event)
            except:
                print failure.Failure() # for debugging
Esempio n. 37
0
    def add_event(self, facility, level, event):
        # send to observers
        for o in self._immediate_observers:
            o(event)
        for o in self._observers:
            eventual.eventually(o, event)

        # buffer locally
        d1 = self.buffers.get(facility)
        if not d1:
            d1 = self.buffers[facility] = {}
        buffer = d1.get(level)
        if not buffer:
            buffer = d1[level] = collections.deque()
        buffer.append(event)

        # enforce size limits on local buffers
        d2 = self.buffer_sizes.get(facility)
        if d2:
            sizelimit = d2.get(level, self.DEFAULT_SIZELIMIT)
        else:
            sizelimit = self.DEFAULT_SIZELIMIT
        while len(buffer) > sizelimit:
            buffer.popleft()

        # check with incident reporter. This is done synchronously rather
        # than via the usual eventual-send to allow the application to do:
        #  log.msg("abandon ship", level=log.BAD)
        #  sys.exit(1)
        #
        # This means the IncidentReporter will do most of its work right
        # here. The reporter is not allowed to make any foolscap calls, and
        # the call to incident_recorded() is required to pass through an
        # eventual-send.

        if self.active_incident_qualifier:
            try:
                # this might call declare_incident
                self.active_incident_qualifier.event(event)
            except:
                print failure.Failure() # for debugging
Esempio n. 38
0
    def testSend(self):
        results = []
        eventually(results.append, 1)
        self.failIf(results)

        def _check():
            self.failUnlessEqual(results, [1])

        eventually(_check)

        def _check2():
            self.failUnlessEqual(results, [1, 2])

        eventually(results.append, 2)
        eventually(_check2)
Esempio n. 39
0
    def testSend(self):
        results = []
        eventually(results.append, 1)
        self.assertFalse(results)

        def _check():
            self.assertEqual(results, [1])

        eventually(_check)

        def _check2():
            self.assertEqual(results, [1, 2])

        eventually(results.append, 2)
        eventually(_check2)
Esempio n. 40
0
 def finish(self, why):
     if self.disconnected:
         return
     assert isinstance(why, failure.Failure), why
     self.disconnected = True
     self.remote_broker = None
     self.abandonAllRequests(why)
     # TODO: why reset all the tables to something useable? There may be
     # outstanding RemoteReferences that point to us, but I don't see why
     # that requires all these empty dictionaries.
     self.myReferenceByPUID = {}
     self.myReferenceByCLID = {}
     self.yourReferenceByCLID = {}
     self.yourReferenceByURL = {}
     self.myGifts = {}
     self.myGiftsByGiftID = {}
     for (cb, args, kwargs) in self.disconnectWatchers:
         eventually(cb, *args, **kwargs)
     self.disconnectWatchers = []
     if self.tub:
         # TODO: remove the conditional. It is only here to accomodate
         # some tests: test_pb.TestCall.testDisconnect[123]
         self.tub.brokerDetached(self, why)
Esempio n. 41
0
 def finish(self, why):
     if self.disconnected:
         return
     assert isinstance(why, failure.Failure), why
     self.disconnected = True
     self.remote_broker = None
     self.abandonAllRequests(why)
     # TODO: why reset all the tables to something useable? There may be
     # outstanding RemoteReferences that point to us, but I don't see why
     # that requires all these empty dictionaries.
     self.myReferenceByPUID = {}
     self.myReferenceByCLID = {}
     self.yourReferenceByCLID = {}
     self.yourReferenceByURL = {}
     self.myGifts = {}
     self.myGiftsByGiftID = {}
     for (cb,args,kwargs) in self.disconnectWatchers:
         eventually(cb, *args, **kwargs)
     self.disconnectWatchers = []
     if self.tub:
         # TODO: remove the conditional. It is only here to accomodate
         # some tests: test_pb.TestCall.testDisconnect[123]
         self.tub.brokerDetached(self, why)
Esempio n. 42
0
 def notify(self, *args, **kwargs):
     for o in self._watchers:
         eventually(o, *args, **kwargs)
Esempio n. 43
0
 def loseConnection(self, _connDone=connectionDone):
     if not self.connected:
         return
     self.connected = False
     eventually(self.peer.connectionLost, _connDone)
     eventually(self.protocol.connectionLost, _connDone)
Esempio n. 44
0
 def write(self, bytes):
     eventually(self.peer.dataReceived, bytes)
Esempio n. 45
0
 def reportViolation(self, why):
     self.violation = why
     eventually(self.d.callback, None)
     return None
Esempio n. 46
0
 def _ready(res):
     self._waiting_for_call_to_be_ready = False
     eventually(self.doNextCall)
     return res
Esempio n. 47
0
 def remote_subscribe_to_all(self, observer, catch_up=False):
     s = Subscription(observer, self._logger)
     eventually(s.subscribe, catch_up)
     # allow the call to return before we send them any events
     return s
Esempio n. 48
0
 def _ready(res):
     self._waiting_for_call_to_be_ready = False
     eventually(self.doNextCall)
     return res
Esempio n. 49
0
 def reportViolation(self, why):
     self.violation = why
     eventually(self.d.callback, None)
     return None
Esempio n. 50
0
 def write(self, bytes):
     eventually(self.peer.dataReceived, bytes)
Esempio n. 51
0
 def loseConnection(self, _connDone=connectionDone):
     if not self.connected:
         return
     self.connected = False
     eventually(self.peer.connectionLost, _connDone)
     eventually(self.protocol.connectionLost, _connDone)
Esempio n. 52
0
 def remote_subscribe_to_incidents(self, observer, catch_up=False, since=""):
     s = IncidentSubscription(observer, self._logger, self)
     eventually(s.subscribe, catch_up, since)
     # allow the call to return before we send them any events
     return s
Esempio n. 53
0
 def _fire(self, result):
     for w in self._watchers:
         eventually(w.callback, result)
     del self._watchers
     self.__repr__ = self._fired_repr
Esempio n. 54
0
 def scheduleCall(self, delivery, ready_deferred):
     self.inboundDeliveryQueue.append( (delivery,ready_deferred) )
     eventually(self.doNextCall)
Esempio n. 55
0
 def remote_subscribe_to_all(self, observer, catch_up=False):
     s = Subscription(observer, self._logger)
     eventually(s.subscribe, catch_up)
     # allow the call to return before we send them any events
     return s
Esempio n. 56
0
#! /usr/bin/env python

from foolscap import Tub
from foolscap.eventual import eventually
import sys
from twisted.internet import reactor

def go():
    t = Tub()
    d = t.getReference(sys.argv[1])
    d.addCallback(lambda rref: rref.callRemote("get_memory_usage"))
    def _got(res):
        print res
        reactor.stop()
    d.addCallback(_got)

eventually(go)
reactor.run()