Beispiel #1
0
    def on_frame(self, frame):
        """
        Called by event loop upon receiving an AMQP frame.

        This will verify all watches on given channel if they were hit,
        and take appropriate action.

        Unhandled frames will be logged - if they were sent, they probably were important.

        :param frame: AMQPFrame that was received
        """
        if self.log_frames is not None:
            self.log_frames.on_frame(monotonic(), frame, 'to_client')

        watch_handled = False  # True if ANY watch handled this

        # ==================== process per-channel watches
        #
        #   Note that new watches may arrive while we process existing watches.
        #   Therefore, we need to copy watches and zero the list before we proceed
        if frame.channel in self.watches:
            watches = self.watches[frame.channel]  # a list
            self.watches[frame.channel] = []

            alive_watches, f = alert_watches(watches, frame)
            watch_handled |= f

            if frame.channel in self.watches:
                # unwatch_all might have gotten called, check that
                for watch in alive_watches:
                    self.watches[frame.channel].append(watch)

        # ==================== process "any" watches
        any_watches = self.any_watches
        self.any_watches = []
        alive_watches, f = alert_watches(any_watches, frame)

        watch_handled |= f

        for watch in alive_watches:
            self.any_watches.append(watch)

        if not watch_handled:
            if isinstance(frame, AMQPMethodFrame):
                logger.warning('[%s] Unhandled method frame %s', self.name,
                               repr(frame.payload))
            else:
                logger.warning('[%s] Unhandled frame %s', self.name, frame)
Beispiel #2
0
    def send(self, frames, priority=False):
        """
        Schedule to send some frames.

        Take care: This won't stop you from sending frames larger tham frame_max.
        Broker will probably close the connection if he sees that.

        :param frames: list of frames or None to close the link
        :param reason: optional human-readable reason for this action
        """
        if self.log_frames is not None:
            for frame in frames:
                self.log_frames.on_frame(monotonic(), frame, 'to_server')

        if frames is not None:
            self.sendf.send(frames, priority=priority)
        else:
            # Listener socket will kill us when time is right
            self.listener_socket.send(None)
Beispiel #3
0
    def test_actually_waits(self):
        a = monotonic()

        self.c.drain(5)

        self.assertGreaterEqual(monotonic() - a, 4)
Beispiel #4
0
 def on_heartbeat(self, frame):
     self.last_heartbeat_on = monotonic()
Beispiel #5
0
 def do_timer_events(self):
     # Timer events
     mono = monotonic()
     while len(self.time_events) > 0 and (self.time_events[0][0] < mono):
         ts, fd, callback = heapq.heappop(self.time_events)
         callback()
Beispiel #6
0
    def start(self,
              wait=True,
              timeout=10.0):  # type: (bool, float, bool) -> None
        """
        Connect to broker. Initialize Cluster.

        Only after this call is Cluster usable.
        It is not safe to fork after this.

        :param wait: block until connection is ready
        :param timeout: timeout to wait until the connection is ready. If it is not, a
                        ConnectionDead error will be raised
        :raise RuntimeError: called more than once
        :raise ConnectionDead: failed to connect within timeout
        """
        if self.started:
            raise RuntimeError(u'[%s] This was already called!' %
                               (self.name, ))
        self.started = True

        self.listener = ListenerThread(name=self.name)

        self.attache_group = AttacheGroup()

        self.events = six.moves.queue.Queue(
        )  # for coolamqp.clustering.events.*

        self.snr = SingleNodeReconnector(self.node, self.attache_group,
                                         self.listener, self.extra_properties,
                                         self.log_frames, self.name)
        self.snr.on_fail.add(lambda: self.events.put_nowait(ConnectionLost()))
        if self.on_fail is not None:
            self.snr.on_fail.add(self.on_fail)

        if self.on_blocked is not None:
            self.snr.on_blocked.add(self.on_blocked)

        # Spawn a transactional publisher and a noack publisher
        self.pub_tr = Publisher(Publisher.MODE_CNPUB, self)
        self.pub_na = Publisher(Publisher.MODE_NOACK, self)
        self.decl = Declarer(self)

        self.attache_group.add(self.pub_tr)
        self.attache_group.add(self.pub_na)
        self.attache_group.add(self.decl)

        self.listener.init()
        self.listener.start()
        self.snr.connect(timeout=timeout)

        if wait:
            # this is only going to take a short amount of time, so we're fine with polling
            start_at = monotonic()
            while not self.connected and monotonic() - start_at < timeout:
                time.sleep(0.1)
            if not self.connected:
                raise ConnectionDead(
                    '[%s] Could not connect within %s seconds' % (
                        self.name,
                        timeout,
                    ))