def __init__(self, proxy, thread_number):
     self.pool = eventlet.GreenPool(thread_number)
     self.input_queue = eventlet.Queue()
     self.output_queue = eventlet.Queue()
     self.proxy = proxy
     self.logger = Logger.create("log")
     self.timeout = 10
    def __init__(self,
                 path,
                 disk_id,
                 compress_transfer=None,
                 compressor_count=3):
        self._offset = None
        self._session = None
        self._ip = None
        self._port = None
        self._crt = None
        self._key = None
        self._ca = None
        self._closing = False
        self._write_error = False
        self._id = None
        self._exception = None
        self._compressor_count = compressor_count
        self._comp_q = eventlet.Queue(maxsize=5)
        self._sender_q = eventlet.Queue(maxsize=5)

        self._sender_evt = None
        self._compressor_evt = None

        self._compress_transfer = compress_transfer
        if self._compress_transfer is None:
            self._compress_transfer = CONF.compress_transfers
        super(HTTPBackupWriterImpl, self).__init__(path, disk_id)
Exemple #3
0
    def __init__(self, create, destroy, size=3):
        if size == 0:
            raise RuntimeError("Zero size would create unbounded resources")
        self.ready = eventlet.Queue(maxsize=size)
        self.trash = eventlet.Queue()
        self.size = size

        self.create = create
        self.destroy = destroy
Exemple #4
0
 def __init__(self, handler, peers, routerid):
     self.logger = logging.getLogger('fbgp.exabgp_connect')
     self.handler = handler
     self.peers = peers
     self.routerid = routerid
     self.conn = None
     self.exabgp = None
     self.running = False
     self.recv_queue = eventlet.Queue(256)
     self.send_queue = eventlet.Queue(256)
Exemple #5
0
    def __init__(self):
        self.config = {}
        self.config['modules'] = {}
        self.config['services'] = {}

        # Dictionary mapping service name to a dict of arguments.
        # Those arguments are only passed to d6service by createservice if they
        #   are not alreay present in the ARGS argument given to createservice.
        self.default_service_args = {}

        cageKeys = ['python.d6cage']
        cageDesc = 'deepsix python cage'
        name = "d6cage"

        deepsix.deepSix.__init__(self, name, cageKeys)

        self.inbox = eventlet.Queue()
        self.dataPath = eventlet.Queue()

        self.table = amqprouter.routeTable()
        self.table.add("local.router", self.inbox)
        self.table.add(self.name, self.inbox)
        self.table.add("router", self.inbox)

        localname = "local." + self.name
        self.table.add(localname, self.inbox)

        self.modules = {}
        self.services = {}

        self.greenThreadPool = eventlet.GreenPool()
        self.greenThreads = []

        self.unloadingServices = {}
        self.reloadingServices = set()

        self.services[self.name] = {}
        self.services[self.name]['service'] = self
        self.services[self.name]['name'] = self.name
        self.services[self.name]['description'] = cageDesc
        self.services[self.name]['inbox'] = self.inbox
        self.services[self.name]['keys'] = self.keys
        self.services[self.name]['type'] = None
        self.services[self.name]['id'] = None

        self.subscribe(
            "local.d6cage",
            "routeKeys",
            callback=self.updateRoutes,
            interval=5)
Exemple #6
0
        def handle_client_subscribe(data):
            """Handle client subscribing to a channel."""
            channel = data['channel']

            # make a queue to receive events from pubsub
            q = eventlet.Queue(maxsize=20)

            # save request context for eventlet ctx switch
            req_ctx_stack = _request_ctx_stack
            req_ctx = req_ctx_stack.top.copy()

            def emit_green(q, req_ctx):
                """Wait for events on the queue and emit them to client."""
                while 1:
                    n = q.get()  # block on waiting for item from queue
                    req_ctx.push()  # restore request context
                    emit(
                        'event', n
                    )  # send event to client (has channel and payload fields)
                    req_ctx.pop()  # done with request context

            # subscribe and queue emit callbacks, async
            subscription = self.pubsub.subscribe(channel, q)
            listen_gthread = eventlet.spawn(emit_green, q, req_ctx)
            self.listen_gthreads[request.sid].append(listen_gthread)
            self.subscriptions[request.sid].append(subscription)
            log.info(f"Client {current_user} subscribed to {channel}")
            emit('subscribed', {'channel': channel})
Exemple #7
0
 def __init__(self, handler):
     self.proto = None
     self.running = False
     self.handler = handler
     self.server_addr = os.environ.get('FBGP_SERVER_ADDR') or 'localhost'
     self.server_port = int(os.environ.get('FBGP_SERVER_PORT') or 9999)
     self.send_q = eventlet.Queue(128)
Exemple #8
0
def test_greenthread_singleton(metaclass: Type, repetitions: int):
    assert singletons.detect_greenthread_environment() == 'eventlet'

    class MySingleton(metaclass=metaclass):
        def __init__(self):
            self.uuid = uuid.uuid4()

    def inner_func(q: eventlet.Queue):
        a = MySingleton()
        b = MySingleton()
        q.put((
            a.uuid,
            b.uuid,
        ))

    test_q = eventlet.Queue()
    greenthreads = []
    for _ in range(repetitions):
        p = eventlet.spawn_n(inner_func, test_q)
        greenthreads.append(p)
        eventlet.sleep()  # force execution context to switch

    seen_uuids = set()
    while len(seen_uuids) < repetitions:
        a, b = test_q.get(timeout=JOIN_TIMEOUT)
        assert a == b
        assert a not in seen_uuids
        seen_uuids.add(a)
Exemple #9
0
        def socket_created(tp):
            print "New socket type %s" % tp

            # Create a new mock socket.
            socket = mock.Mock()

            # Hook its bind and connect methods, so we can remember the address
            # that it binds or connects to.
            socket.bind.side_effect = make_socket_bound(socket)
            socket.connect.side_effect = make_socket_connect(socket)

            # Create a queue that we can use to deliver messages to be received
            # on this socket.
            socket.rcv_queue = eventlet.Queue(1)

            # Hook the socket's recv_multipart and poll methods, to wait on
            # this queue.
            socket.recv_multipart.side_effect = make_recv('multipart', socket)
            socket.recv_json.side_effect = make_recv('json', socket)
            socket.poll.side_effect = make_poll(socket)

            # Add this to the test code's list of known sockets.
            self.sockets.add(socket)

            return socket
Exemple #10
0
 def initialize(self, conf, context):
     LOG.info("API Spout is started")
     self.MAX_WORKERS = FLAGS.get('rabbit_read_workers')
     self.pid = os.getpid()
     self.worker_pool = eventlet.GreenPool(self.MAX_WORKERS)
     self.rpc = rpc.RemoteProcedureCall()
     self.queue = eventlet.Queue()
Exemple #11
0
    def benchmark_datasource_update(self, size):
        """Benchmark a datasource update.

        Time the propagation of a datasource update from datasource.poll() to
        ending up in the datasource.dataPath queue.
        """

        LOG.info("%s:: benchmarking datasource update of %d rows", size)
        self.datasource.datarows = size

        # intercept the queue addition so it doesn't immediately get pulled off
        # by the d6cage
        received = eventlet.Queue()
        self.mox.StubOutWithMock(self.datasource.dataPath, "put_nowait")
        self.datasource.dataPath.put_nowait(mox.IgnoreArg()).WithSideEffects(
            received.put_nowait)
        self.mox.ReplayAll()

        # poll and then wait until we've got an item from our queue
        LOG.info("%s:: polling datasource", self.__class__.__name__)
        self.datasource.poll()
        result = received.get(timeout=30)
        self.assertTrue(result.body)
        self.assertEqual(len(result.body.data), size)
        self.mox.VerifyAll()
Exemple #12
0
 def bind(self):
     """ Binds the netlink socket and initializes data structures used for
     communication between the dispatcher and the server.
     """
     if self.socket is not None:
         try:
             self.socket.close()
         except socket.error:
             # Ignore the error as we will try to rebind.
             pass
     self.__intfqueue = collections.defaultdict(eventlet.Queue)
     while self.__running:
         eventlet.sleep(1)
     self.__intfevent = eventlet.Queue()
     try:
         # pylint: disable=no-member
         self.socket = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW,
                                     self.NETLINK_ROUTE)
         # Set rcv. buffer size to 30M (higher than the rmem_max of 8M).
         self.socket.setsockopt(socket.SOL_SOCKET, self.__SO_RCVBUFFORCE,
                                self.__BUF_SIZE)
     except socket.error as ex:
         raise RuntimeError('open: socket err: %s' % ex)
     # Open a socket for receiving netlink msgs.
     try:
         # PID_MAX_LIMIT is 2^22 allowing 1024 sockets per-pid. We
         # start with 1 in the upper space (top 10 bits) instead of
         # 0 to avoid conflicts with netlink_autobind which always
         # attempts to bind with the pid (and on failure with
         # negative values -4097, -4098, -4099 etc.)
         self.socket.bind((os.getpid() | (1 << 22), Netlink.GROUPS))
     except socket.error as ex:
         raise RuntimeError('bind: socket err: %s' % ex)
Exemple #13
0
    def __init__(self,
                 members,
                 name,
                 server_address='tcp://*:5556',
                 pubsub_address='tcp://*:5557'):
        """RPC server that runs on the master node.

        Note: Currently, the server is not threaded. A high volume of
                traffic is not expected, so its probably not necessary.

        Arguments:
        members: [list] the namespace/name of running servers in the cluster
        name: [string] the namespace/name of the node this is running on
        server_address: [string] the web address the main server runs on
        pubsub_address: [string] the web address the pub_sub runs on
        """

        self.server = RPCServer(server_address)
        self.publisher = Publisher(pubsub_address)
        self.message_queue = eventlet.Queue()
        self.running = Switch()
        self.running.on()
        self.name = name
        self.phase = 0
        self.num_repoted = 0
        self.busy = False

        # TODO(egarcia): Make this thread safe
        self.members = {}
        for member in members:
            self.members[member] = {'phase': 0, 'data': {}}

        # Start the server
        self.__listen()
Exemple #14
0
    def test_channel_wait(self):
        channel = eventlet.Queue(0)
        events = []

        def another_greenlet():
            events.append('sending hello')
            channel.put('hello')
            events.append('sending world')
            channel.put('world')
            events.append('sent world')

        eventlet.spawn(another_greenlet)

        events.append('waiting')
        events.append(channel.get())
        events.append(channel.get())

        self.assertEqual(
            ['waiting', 'sending hello', 'hello', 'sending world', 'world'],
            events)
        eventlet.sleep(0)
        self.assertEqual([
            'waiting', 'sending hello', 'hello', 'sending world', 'world',
            'sent world'
        ], events)
        def simulated_time_sleep(secs=None):
            if secs is None:
                # Thread just wants to yield to any other waiting thread.
                self.give_way()
                return
            # Create a new queue.
            queue = eventlet.Queue(1)
            queue.stack = inspect.stack()[1][3]

            # Add it to the dict of sleepers, together with the waking up time.
            self.sleepers[queue] = self.current_time + secs

            _log.info("T=%s: %s: Start sleep for %ss until T=%s",
                      self.current_time,
                      queue.stack,
                      secs,
                      self.sleepers[queue])

            # Do a zero time real sleep, to allow other threads to run.
            self.real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)

            # Block until something is posted to the queue.
            queue.get(True)

            # Wake up.
            return None
Exemple #16
0
    def __init__(self, client, router, connection, message_handler):
        """ A Session between a Client and a Router.

        :Parameters:
            client : instance
                An instance of :class:`peers.Client`
            router : instance
                An instance of :class:`peers.Router`
            connection : instance
                An instance of ``wampy.transports``.
                Defaults to ``wampy.transports.WebSocket``
            message_handler : instance
                An instance of ``wampy.message_handler.MessageHandler``,
                or a subclass of

        """
        self.client = client
        self.router = router
        self.connection = connection
        self.message_handler = message_handler

        self.request_ids = {}
        self.subscription_map = {}
        self.registration_map = {}

        self.session_id = None
        # spawn a green thread to listen for incoming messages over
        # a connection and put them on a queue to be processed
        self._managed_thread = None
        self._message_queue = eventlet.Queue()
        self._listen(self.connection, self._message_queue)
Exemple #17
0
    def __init__(self):
        self.deleteQueue = eventlet.Queue()
        self.deleteStopped = False
        self.rebuildQueue = eventlet.Queue()
        self.rebuildStopped = False

        # Dictionary of weak references to Locks that govern file locking
        # of ThinAppFile objects. The mapping is fileId -> Lock. If no entry
        # is present, the file is assumed to be unlocked.
        self.fileLocks = weakref.WeakValueDictionary()

        # It's locks all the way down.
        self.lockMutex = threading.Lock()

        # By default this lets 1000 requests run so we may want to limit
        # it to a lower number.
        self.pool = eventlet.GreenPool()
Exemple #18
0
    def test_waiters_that_cancel(self):
        q = eventlet.Queue()

        gt = eventlet.spawn(do_bail, q)
        self.assertEqual(gt.wait(), 'timed out')

        q.put('hi')
        self.assertEqual(q.get(), 'hi')
Exemple #19
0
    def test_gt_link_callback_added_during_execution(self):
        pool = greenpool.GreenPool()
        q1 = eventlet.Queue()
        q2 = eventlet.Queue()

        def func():
            q1.put(None)
            q2.get()

        link_callback = mock.Mock()

        thread = pool.spawn(func)
        q1.get()
        thread.link(link_callback)
        q2.put(None)
        pool.waitall()
        link_callback.assert_called_once_with(thread)
Exemple #20
0
def handle_blocks(websocket):
    """Receive a connection and send it database notifications.
    """
    queue = eventlet.Queue()
    eventlet.spawn(pg_listen, queue, 'blocks')
    while 1:
        item = queue.get()
        websocket.send(item.payload)
Exemple #21
0
    def test_gt_link_callback_exception_inside_thread(self):
        pool = greenpool.GreenPool()
        q1 = eventlet.Queue()
        q2 = eventlet.Queue()

        def func():
            q1.put(None)
            q2.get()
            raise Exception()

        link_callback = mock.Mock()

        thread = pool.spawn(func)
        q1.get()
        thread.link(link_callback)
        q2.put(None)
        pool.waitall()
        link_callback.assert_called_once_with(thread)
Exemple #22
0
 def test_two_bogus_waiters(self):
     q = eventlet.Queue()
     gt1 = eventlet.spawn(do_bail, q)
     gt2 = eventlet.spawn(do_bail, q)
     eventlet.sleep(0)
     q.put('sent')
     self.assertEqual(gt1.wait(), 'timed out')
     self.assertEqual(gt2.wait(), 'timed out')
     self.assertEqual(q.get(), 'sent')
Exemple #23
0
 def __init__(self, *args, **kwargs):
     super(FlowBasedBGP, self).__init__(*args, **kwargs)
     self.logger = get_logger('fbgp', os.environ.get('FBGP_LOG', None),
                              os.environ.get('FBGP_LOG_LEVEL', 'info'))
     self.faucet_api = kwargs['faucet_experimental_api']
     self.nexthop_to_pathid = {}
     self.path_mapping = collections.defaultdict(set)
     self.vip_assignment = {}
     self.rcv_msg_q = eventlet.Queue(256)
Exemple #24
0
    def test_send_last(self):
        q = eventlet.Queue()
        def waiter(q):
            self.assertEqual(q.get(), 'hi2')

        gt = eventlet.spawn(eventlet.with_timeout, 0.1, waiter, q)
        eventlet.sleep(0)
        eventlet.sleep(0)
        q.put('hi2')
        gt.wait()
Exemple #25
0
    def test_resize_down(self):
        q = eventlet.Queue(5)

        for i in range(5):
            q.put(i)

        self.assertEqual(list(q.queue), list(range(5)))
        q.resize(1)
        eventlet.sleep(0)
        self.assertEqual(list(q.queue), list(range(5)))
Exemple #26
0
    def __init__(self, path, disk_id, compress_transfer=None, encoder_count=3):
        self._msg_id = None
        self._stdin = None
        self._stdout = None
        self._stderr = None
        self._offset = None
        self._ssh = None
        self._sender_q = eventlet.Queue(maxsize=5)
        self._enc_q = eventlet.Queue(maxsize=5)
        self._sender_evt = None
        self._encoder_evt = []
        self._encoder_cnt = encoder_count
        self._exception = None
        self._closing = False

        self._compress_transfer = compress_transfer
        if self._compress_transfer is None:
            self._compress_transfer = CONF.compress_transfers
        super(SSHBackupWriterImpl, self).__init__(path, disk_id)
Exemple #27
0
 def test_waiting(self):
     q = eventlet.Queue()
     gt1 = eventlet.spawn(q.get)
     eventlet.sleep(0)
     self.assertEqual(1, q.getting())
     q.put('hi')
     eventlet.sleep(0)
     self.assertEqual(0, q.getting())
     self.assertEqual('hi', gt1.wait())
     self.assertEqual(0, q.getting())
Exemple #28
0
        def multiple_greenthread_contexts(self):
            csm = EventletContextStackManager()

            def make_func(name):
                def func(csm, queue):
                    csm.push_coroutine(name)
                    queue.put(list(csm.iter_current_stack()))

                func.__name__ = name
                return func

            foo_queue = eventlet.Queue()
            bar_queue = eventlet.Queue()
            foo = eventlet.spawn(make_func('foo'), csm, foo_queue)
            bar = eventlet.spawn(make_func('bar'), csm, bar_queue)
            foo.wait()
            bar.wait()
            Assert(foo_queue.get()) == ['foo']
            Assert(bar_queue.get()) == ['bar']
Exemple #29
0
def handle(ws):
    """
    Receive a connection and send it database notifications.
    """
    q = eventlet.Queue()
    eventlet.spawn(dblisten, q)
    while 1:
        n = q.get()
        print(n)
        ws.send(n.payload)
Exemple #30
0
 def test_task_done(self):
     channel = eventlet.Queue(0)
     X = object()
     gt = eventlet.spawn(channel.put, X)
     result = channel.get()
     assert result is X, (result, X)
     assert channel.unfinished_tasks == 1, channel.unfinished_tasks
     channel.task_done()
     assert channel.unfinished_tasks == 0, channel.unfinished_tasks
     gt.wait()