Exemple #1
0
def _lib_out(router):
    if globals(
    )['shutdown_in_progress'] is False:  # We don't want to add more shutdown notices to the queue...
        globals()['shutdown_in_progress'] = True
        event.timeout(
            router.broadcast_shutdown(), event.abort
        )  # for each server, add one second onto the shutdown timeout..
Exemple #2
0
 def test_abort(self):
     print 'test_abort'
     def __time_cb():
         raise NotImplementedError, 'abort failed!'
     event.timeout(5, __time_cb)
     event.timeout(1, event.abort)
     event.dispatch()
Exemple #3
0
 def test_timeout2(self):
     def __timeout2_cb(start, secs):
         dur = int(time.time() - start)
         assert dur == secs, 'timeout2 failed'
     print 'test_timeout2'
     event.timeout(5, __timeout2_cb, time.time(), 5)
     event.dispatch()
Exemple #4
0
    def test_timeout2(self):
        def __timeout2_cb(start, secs):
            dur = int(time.time() - start)
            assert dur == secs, 'timeout2 failed'

        print 'test_timeout2'
        event.timeout(5, __timeout2_cb, time.time(), 5)
        event.dispatch()
Exemple #5
0
 def test_signal2(self):
     def __signal2_cb(sig):
         if sig:
             event.abort()
         else:
             os.kill(os.getpid(), signal.SIGUSR1)
     print 'test_signal2'
     event.signal(signal.SIGUSR1, __signal2_cb, signal.SIGUSR1)
     event.timeout(2, __signal2_cb)
Exemple #6
0
    def test_abort(self):
        print 'test_abort'

        def __time_cb():
            raise NotImplementedError, 'abort failed!'

        event.timeout(5, __time_cb)
        event.timeout(1, event.abort)
        event.dispatch()
Exemple #7
0
 def test_exception(self):
     print 'test_exception'
     def __bad_cb(foo):
         raise NotImplementedError, foo
     event.timeout(0, __bad_cb, 'bad callback')
     try:
         event.dispatch()
     except NotImplementedError:
         pass
Exemple #8
0
    def test_signal2(self):
        def __signal2_cb(sig):
            if sig:
                event.abort()
            else:
                os.kill(os.getpid(), signal.SIGUSR1)

        print 'test_signal2'
        event.signal(signal.SIGUSR1, __signal2_cb, signal.SIGUSR1)
        event.timeout(2, __signal2_cb)
Exemple #9
0
    def test_exception(self):
        print 'test_exception'

        def __bad_cb(foo):
            raise NotImplementedError, foo

        event.timeout(0, __bad_cb, 'bad callback')
        try:
            event.dispatch()
        except NotImplementedError:
            pass
Exemple #10
0
 def test_callback_exception(self):
     print 'test_callback_exception'
     def __raise_cb(exc):
         raise exc
     def __raise_catch_cb(exc):
         try:
             raise exc
         except:
             pass
     event.timeout(0, __raise_cb, StandardError())
     event.timeout(0, __raise_catch_cb, Exception())
     self.assertRaises(StandardError, event.dispatch)
Exemple #11
0
 def test_thread(self):
     print 'test_thread'
     def __time_cb(d):
         assert d['count'] == 3
     def __time_thread(count, d):
         for i in range(count):
             time.sleep(1)
             d['count'] += 1
     d = { 'count': 0 }
     thread.start_new_thread(__time_thread, (3, d))
     event.timeout(4, __time_cb, d)
     event.dispatch()
Exemple #12
0
    def test_thread(self):
        print 'test_thread'

        def __time_cb(d):
            assert d['count'] == 3

        def __time_thread(count, d):
            for i in range(count):
                time.sleep(1)
                d['count'] += 1

        d = {'count': 0}
        thread.start_new_thread(__time_thread, (3, d))
        event.timeout(4, __time_cb, d)
        event.dispatch()
Exemple #13
0
    def test_callback_exception(self):
        print 'test_callback_exception'

        def __raise_cb(exc):
            raise exc

        def __raise_catch_cb(exc):
            try:
                raise exc
            except:
                pass

        event.timeout(0, __raise_cb, StandardError())
        event.timeout(0, __raise_catch_cb, Exception())
        self.assertRaises(StandardError, event.dispatch)
Exemple #14
0
    def read(self, timeout=None):
        '''
        Read from the transport. If no data is available, should return None.
        The timeout is ignored as this returns only data that has already
        been buffered locally.
        '''
        # NOTE: copying over this comment from Connection, because there is
        # knowledge captured here, even if the details are stale
        # Because of the timer callback to dataRead when we re-buffered,
        # there's a chance that in between we've lost the socket. If that's
        # the case, just silently return as some code elsewhere would have
        # already notified us. That bug could be fixed by improving the
        # message reading so that we consume all possible messages and ensure
        # that only a partial message was rebuffered, so that we can rely on
        # the next read event to read the subsequent message.
        if not hasattr(self, '_sock'):
            return None

        # This is sort of a hack because we're faking that data is ready, but
        # it works for purposes of supporting timeouts
        if timeout:
            if self._heartbeat_timeout:
                self._heartbeat_timeout.delete()
            self._heartbeat_timeout = \
                event.timeout(timeout, self._sock_read_cb, self._sock)
        elif self._heartbeat_timeout:
            self._heartbeat_timeout.delete()
            self._heartbeat_timeout = None

        return self._sock.read()
Exemple #15
0
    def read(self, timeout=None):
        '''
        Read from the transport. If no data is available, should return None.
        The timeout is ignored as this returns only data that has already
        been buffered locally.
        '''
        # NOTE: copying over this comment from Connection, because there is
        # knowledge captured here, even if the details are stale
        # Because of the timer callback to dataRead when we re-buffered,
        # there's a chance that in between we've lost the socket. If that's
        # the case, just silently return as some code elsewhere would have
        # already notified us. That bug could be fixed by improving the
        # message reading so that we consume all possible messages and ensure
        # that only a partial message was rebuffered, so that we can rely on
        # the next read event to read the subsequent message.
        if not hasattr(self, '_sock'):
            return None

        # This is sort of a hack because we're faking that data is ready, but
        # it works for purposes of supporting timeouts
        if timeout:
            if self._heartbeat_timeout:
                self._heartbeat_timeout.delete()
            self._heartbeat_timeout = \
                event.timeout(timeout, self._sock_read_cb, self._sock)
        elif self._heartbeat_timeout:
            self._heartbeat_timeout.delete()
            self._heartbeat_timeout = None

        return self._sock.read()
Exemple #16
0
    def activate(self):
        """Enable receive via multicast.

        Joins multicast group and creates reactor.
        """
        if self.is_active:
            return
        self.mc_socket = UdpSocket()
        self.mc_socket.bind(self.dest_addr)
        self.mc_socket.multicast_interface = self.dest_if
        self.mc_socket.join_mcast_group(self.dest_addr[0])
        self.mc_reactor = RmcReactor(self.mc_socket, self)
        self.mc_reactor.add_read()
        self._hb_timer = event.timeout(0.1, self._hb_timer_tick)
        self._chk_timer = event.timeout(0.01, self._chk_timer_tick)
        self.is_active = True
        return
 def _flag_activity(self):
   """
   Flag that this socket is active.
   """
   # is there a better way of reseting a timer?
   if self._inactive_event:
     self._inactive_event.delete()
     self._inactive_event = event.timeout( self._inactive_timeout, self._protected_cb, self._inactive_cb )
Exemple #18
0
 def reset_heartbeat(self):
     """Reset heartbeat time to lowest value.
     """
     if not self._hb_timer:
         return
     self._hb_timer.delete()
     t_next = self._next_hb_time(True)
     self._hb_timer = event.timeout(t_next, self._hb_timer_tick)
     return t_next
 def _set_read_cb(self, cb):
   """
   Set the read callback.  If there's data in the output buffer, immediately
   setup a call.
   """
   self._parent_read_cb = cb
   #if self._read_buf.tell()>0 and self._parent_read_cb!=None and self._pending_read_cb_event==None:
   if len(self._read_buf)>0 and self._parent_read_cb!=None and self._pending_read_cb_event==None:
     self._pending_read_cb_event = \
       event.timeout( 0, self._protected_cb, self._parent_read_timer_cb )
Exemple #20
0
    def _recv_close(self, method_frame):
        self.connection._close_info = {
            'reply_code': method_frame.args.read_short(),
            'reply_text': method_frame.args.read_shortstr(),
            'class_id': method_frame.args.read_short(),
            'method_id': method_frame.args.read_short()
        }

        self._send_close_ok()

        # Clear the socket close callback because we should be expecting it.  The fact
        # that it is called in practice means that we flush the data, rabbit processes
        # and then closes the socket before the timer below fires.  I don't know what
        # this means, but it is surprising. - AW
        if self.connection._sock != None:
            self.connection._sock.close_cb = None

        # Schedule the actual close for later so that handshake IO can take place.
        # Even though it's scheduled at 0, it's queued after the frame IO
        event.timeout(0, self.connection._close_socket)

        # Likewise, call any potential close callback on a delay
        event.timeout(0, self.connection._callback_close)
  def connect(self, delay=0):
    '''Connect.'''
    # Ensure that the connection has cleaned up old resources.  Do it immediately
    # to be sure that output is buffered and no errors are raised.
    try:
      self._connection.logger.debug("disconnecting connection")
      self._connection.disconnect()
    except:
      self._connection.logger.exception( "error while disconnecting" )

    self._connection.logger.debug("Pending connect: %s", self._pending_connect)
    if not self._pending_connect:
      self._connection.logger.debug("Scheduling a connection in %s", delay)
      self._pending_connect = event.timeout(delay, self._connect_cb)
Exemple #22
0
  def _recv_close(self, method_frame):
    self.connection._close_info = {
      'reply_code'    : method_frame.args.read_short(),
      'reply_text'    : method_frame.args.read_shortstr(),
      'class_id'      : method_frame.args.read_short(),
      'method_id'     : method_frame.args.read_short()
    }

    self._send_close_ok()

    # Clear the socket close callback because we should be expecting it.  The fact
    # that it is called in practice means that we flush the data, rabbit processes
    # and then closes the socket before the timer below fires.  I don't know what
    # this means, but it is surprising. - AW
    if self.connection._sock != None:
      self.connection._sock.close_cb = None

    # Schedule the actual close for later so that handshake IO can take place.
    # Even though it's scheduled at 0, it's queued after the frame IO
    event.timeout(0, self.connection._close_socket)

    # Likewise, call any potential close callback on a delay
    event.timeout(0, self.connection._callback_close)
Exemple #23
0
    def connect(self, delay=0):
        '''Connect.'''
        # Ensure that the connection has cleaned up old resources.  Do it immediately
        # to be sure that output is buffered and no errors are raised.
        try:
            self._connection.logger.debug("disconnecting connection")
            self._connection.disconnect()
        except:
            self._connection.logger.exception("error while disconnecting")

        self._connection.logger.debug("Pending connect: %s",
                                      self._pending_connect)
        if not self._pending_connect:
            self._connection.logger.debug("Scheduling a connection in %s",
                                          delay)
            self._pending_connect = event.timeout(delay, self._connect_cb)
Exemple #24
0
    def set_write_timeout(self, tmo):
        """Set the timeout for write events to tmo seconds.

        tmo is the timeout in seconds as float. If its <= 0 the
        timeout is canceled.

        On timeout the on_write_timeout mehtod is called.
        """
        if self._write_timeout:
            self._write_timeout.delete()
            self._write_timeout = None
        if not tmo or tmo <= 0.0:
            # time out time less then 0 means cancel timeout
            self._wr_tmo = 0.0
            return
        self._write_timeout = event.timeout(tmo, self.on_write_timeout)
        self._wr_tmo = tmo
        return
 def set_inactive_timeout(self, t):
   """
   Set the inactivity timeout.  If is None or 0, there is no activity timeout.
   If t>0 then socket will automatically close if there has been no activity
   after t seconds (float supported).  Will raise TypeError if <t> is invalid.
   """
   if t==None or t==0:
     if self._inactive_event:
       self._inactive_event.delete()
       self._inactive_event = None
     self._inactive_timeout = 0
   elif isinstance(t,(int,long,float)):
     if self._inactive_event:
       self._inactive_event.delete()
     self._inactive_event = event.timeout( t, self._inactive_cb )
     self._inactive_timeout = t
   else:
     raise TypeError( "invalid timeout %s"%(str(t)) )
  def _connect_cb(self, timeout_at, *args, **kwargs):
    '''
    Local support for synch and asynch connect. Required because 
    `event.timeout` doesn't support kwargs. They are spec'd though so that
    we can branch how exceptions are handled. 
    '''
    err = self._sock.connect_ex( *args )

    if not err:
      self._peername = "%s:%d"%self._sock.getpeername()
      self._read_event = event.read( self._sock, self._protected_cb, self._read_cb )
      self._write_event = event.write( self._sock, self._protected_cb, self._write_cb )
      
      if self._connect_event:
        self._connect_event.delete()
        self._connect_event = None

    elif err in (errno.EINPROGRESS,errno.EALREADY):
      # Only track timeout if we're about to re-schedule. Should only receive
      # these on a non-blocking socket.
      if isinstance(timeout_at,float) and time.time()>timeout_at:
        self._error_msg = 'timeout connecting to %s'%str(args)
        self.close()
        return
      
      if self._connect_event:
        self._connect_event.delete()

      # Checking every 100ms seems to be a reasonable amount of frequency. If
      # requested this too can be configurable.
      self._connect_event = event.timeout(0.1, self._connect_cb, 
        timeout_at, *args)
    else:
      if self._connect_event:
        self._connect_event.delete()

      self._error_msg = os.strerror(err)
      serr = socket.error( err, self._error_msg )

      if kwargs.get('immediate_raise'):
        raise serr
      else:
        self._handle_error( serr )
  def _read_cb(self):
    """
    Read callback from libevent.
    """
    # We should be able to use recv_into for speed and efficiency, but sadly
    # this was broken after 2.6.1 http://bugs.python.org/issue7827
    self._error_msg = "error reading from socket"
    data = self._sock.recv( self.getsockopt(socket.SOL_SOCKET,socket.SO_RCVBUF) )
    if len(data)>0:
      if self._debug:
        self._logger.debug( "read %d bytes from %s"%(len(data), self._peername) )
      # 23 Feb 09 aaron - There are cases where the client will have started
      # pushing data right away, and there's a chance that async handling of
      # accept will cause data to be read before the callback function has been
      # set.  I prefer to ignore data if no read callback defined, but it's
      # better to just limit the overall size of the input buffer then to use
      # a synchronous callback to __parent_accept_cb.
      # TODO: So what is the best way of handling this problem, and if sticking
      # with a max input buffer size, what's the correct algorithm?  Maybe better
      # approach is to raise a notice to a callback and let the callback decide
      # what to do.
      self._flag_activity()
      self._read_buf.extend( data )

      if self._max_read_buffer and len(self._read_buf) > self._max_read_buffer:
        if self._debug:
          self._logger.debug( "buffer for %s overflowed!"%(self._peername) )

        # Clear the input buffer so that the callback flush code isn't called in close
        self._read_buf = bytearray()
        self.close()
        return None
  
      # Callback asynchronously so that priority is given to libevent to
      # allocate time slices.
      if self._parent_read_cb!=None and self._pending_read_cb_event==None:
        self._pending_read_cb_event = \
          event.timeout( 0, self._protected_cb, self._parent_read_timer_cb )

    else:
      self.close()
      return None
    return True
Exemple #28
0
def main(local_ip, mca):
    my_sock = UdpSocket((local_ip, 0))
    my_sock.send_buffer_size = 100000
    mc_sock = UdpSocket(mca)
    mc_sock.join_mcast_group(mca[0])
    mc_sock.receive_buffer_size = 10000
    mc_sock.send_buffer_size = 100000
    sender = Sender()
    handler = RmcProtocolHandler(my_sock, mc_sock)
    handler.MAX_BACKLOG = 5000
    handler.MAX_BURST = 5
    handler.no_sync = True
##     handler.add_action('__received_packet__', sender.on_incomming_packet)
##     handler.add_action('__sent_packet__', sender.on_outgoing_packet)
    handler.add_action('missing_heartbeat', sender.on_missing_sender)
    handler.add_action('new_sender', sender.on_new_sender)
    handler.add_action('new_packet', sender.on_new_packet)
    handler.add_action('got_heartbeat', sender.on_heartbeat)
    handler.add_action('got_nack', sender.on_nack)
    handler.add_action('got_reset', sender.on_reset)
    handler.add_action('got_lost', sender.on_lost)
    handler.add_action('sent_heartbeat', sender.sent_heartbeat)
    handler.add_action('sent_lost', sender.sent_lost)
    handler.add_action('sent_nack', sender.sent_nack)
    print 'SENDER STARTED', repr(handler.local_addr)
    # spool first batch of packets
    for n in range(3):
        p = packet.Packet()
        p.payload = 's: %d' % n
        handler.send(p)
    # delayed reset
    p = packet.Packet(seq=200)
    p.flags |= packet.RESET
    p.payload = ''
    event.timeout(2.0, send_p, [p], handler)
    # more packets, fast!
    pl = [ packet.Packet(payload=('%06d: ' % n + 'Z' * 80)) for n in xrange(50) ]
    event.timeout(3.0, send_p, pl, handler)
    # more packets, fast!
    pl = [ packet.Packet(payload=('%06d: ' % n + 'Z' * 80)) for n in xrange(50, 550) ]
    event.timeout(4.0, send_p, pl, handler)
    try:
        event.dispatch()
    except KeyboardInterrupt:
        pass
    handler.finish()
    now = time.time()
    print '%.6f: DONE' % now
    return
Exemple #29
0
def _call_again(t, f, *args):
    f(*args)
    event.timeout(t, _call_again, t, f, *args).add()
Exemple #30
0
def call_every(t, f, *args):
    event.timeout(t, _call_again, t, f, *args).add()
Exemple #31
0
    def __init__(self, uc_socket, mc_socket=None):
        """Setup rmc handler.

        uc_socket:    UdpSocket bound to local unicast address
        mc_socket:    UdpSocket bound to multicast address, optional

        Attributes:
          mc_socket:    UdpSocket bound to multicast address
          uc_socket:    UdpSocket bound to local unicast address
          mc_reactor:   RmcReactor for multicast address (receive only)
          uc_reactor:   RmcReactor for unicast address (receive/send)
          dest_addr:    ip/port of multicast channel
          local_addr:   ip/port for unicast
          seq:          current sequence no. for outgoing packets
          no_sync:      if True don't send NACKs on first heartbeat for a sender
          silent :      if True don't send data, never ever, just listen
        """
        # create multicast socket
##         self.mc_socket = UdpSocket((mc_addr[0], mc_addr[1]))
##         self.mc_socket.multicast_interface = mc_interface
##         self.mc_socket.join_mcast_group(mc_addr[0])
        self.mc_socket = mc_socket
        self.uc_socket = uc_socket
        # create reactors for both sockets
        # and attach them to this protocol handler
        self.uc_reactor = RmcReactor(self.uc_socket, self)
        self.local_addr = self.uc_socket.getsockname()
        # if multicast socket is None
        # sending will use the unicast socket as fallback
        if mc_socket is None:
            self.mc_reactor = self.uc_reactor
##             self.mc_socket.multicast_interface = mc_interface
        else:
            self.mc_reactor = RmcReactor(mc_socket, self)
            self.mc_socket.multicast_loop = True
        # get/assign addresses
        self.dest_addr = self.mc_socket.getsockname()
        # send_buffer is a list of packets to send out
        # send_prio_buffer is for outgoing packets w/ high priority
        self._send_buffer = []
        self._send_prio_buffer = []
        # sequence counter for outgoing data packets
        self.seq = 0
        # remember last data packet sent
        self._last_sent = None
        # keep track of peers
        self._peers = {}
        # our outgoing backlog
        self._backlog = []
        # activate reactors
        self.mc_reactor.add_read()
        self.uc_reactor.add_read()
        # initialize generator for hearbeat times
        self._hb_gen = self._hb_generator()
        self._hb_gen.next()
        # mapping to keep track of event subscriptions
        self._ev_handler = {}
        # flag if write event is active
        self._write_active = False
        # flag when set, no sync on first heartbeat of a sender is done
        self.no_sync = False
        # flag when set, no data will be sent (ever!)
        self._silent = False
        # prepare continous timer ticks
        self._hb_timer = event.timeout(0.1, self._hb_timer_tick)
        self._chk_timer = event.timeout(0.01, self._chk_timer_tick)
        # flag showing if this handler does receive data via multicast
        self.is_active = True
        return
Exemple #32
0
 def _hb_timer_tick(self):
     self._hb_timer.delete()
     t_next = self.on_heartbeat()
     self._hb_timer = event.timeout(t_next, self._hb_timer_tick)
     return
 def schedule_call_global(self, seconds, cb, *args, **kwargs):
     event_timeout = event.timeout(seconds, lambda : cb(*args, **kwargs) and None)
     return event_wrapper(event_timeout)
Exemple #34
0
 def _chk_timer_tick(self):
     self._chk_timer.delete()
     self._check_sender_timeouts()
     self._chk_timer = event.timeout(0.01, self._chk_timer_tick)
     return
Exemple #35
0
def _lib_out(router):
    if globals()['shutdown_in_progress'] is False:  # We don't want to add more shutdown notices to the queue...
        globals()['shutdown_in_progress'] = True
        event.timeout(router.broadcast_shutdown(), event.abort)  # for each server, add one second onto the shutdown timeout..
Exemple #36
0
 def __init__(self, t, f, *args):
     tm = event.timeout(t, f, *args)
     tm.add()
     self._approx_fire = time.time() + t
     self._ev_timer = tm
 def schedule_call_local(self, seconds, cb, *args, **kwargs):
     timer = LocalTimer(cb, args, kwargs)
     event_timeout = event.timeout(seconds, timer)
     timer.impl = event_timeout
     return timer
Exemple #38
0
 def schedule_call_local(self, seconds, cb, *args, **kwargs):
     timer = LocalTimer(cb, args, kwargs)
     event_timeout = event.timeout(seconds, timer)
     timer.impl = event_timeout
     return timer
Exemple #39
0
 def schedule_call_global(self, seconds, cb, *args, **kwargs):
     event_timeout = event.timeout(seconds,
                                   lambda: cb(*args, **kwargs) and None)
     return event_wrapper(event_timeout)