def test_exceptionpreservation(self): # events for controlling execution order gt1event = Event() gt2event = Event() def test_gt1(): try: raise KeyError() except KeyError: gt1event.send('exception') gt2event.wait() assert sys.exc_info()[0] is KeyError gt1event.send('test passed') def test_gt2(): gt1event.wait() gt1event.reset() assert sys.exc_info()[0] is None try: raise ValueError() except ValueError: gt2event.send('exception') gt1event.wait() assert sys.exc_info()[0] is ValueError g1 = eventlet.spawn(test_gt1) g2 = eventlet.spawn(test_gt2) try: g1.wait() g2.wait() finally: g1.kill() g2.kill()
def test_greenthread_raise_in_kill(container_factory, rabbit_config, logger): class Service(object): name = "service" @rpc def echo(self, arg): return arg # pragma: no cover container = container_factory(Service, rabbit_config) queue_consumer = get_extension(container, QueueConsumer) rpc_consumer = get_extension(container, RpcConsumer) # an error in rpc_consumer.handle_message will kill the queue_consumer's # greenthread. when the container suicides and kills the queue_consumer, # it should warn instead of re-raising the original exception exc = Exception("error handling message") with patch.object(rpc_consumer, "handle_message") as handle_message: handle_message.side_effect = exc container.start() with ServiceRpcProxy("service", rabbit_config) as service_rpc: # spawn because `echo` will never respond eventlet.spawn(service_rpc.echo, "foo") # container will have died with the messaging handling error with pytest.raises(Exception) as exc_info: container.wait() assert str(exc_info.value) == "error handling message" # queueconsumer will have warned about the exc raised by its greenthread assert logger.warn.call_args_list == [call("QueueConsumer %s raised `%s` during kill", queue_consumer, exc)]
def wait_many(timeout=1): #: Create connection #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. connection = BrokerConnection(hostname="localhost", userid="guest", password="******", virtual_host="/") #: SimpleQueue mimics the interface of the Python Queue module. #: First argument can either be a queue name or a kombu.Queue object. #: If a name, then the queue will be declared with the name as the queue #: name, exchange name and routing key. queue = connection.SimpleQueue("kombu_demo") while True: try: message = queue.get(block=False, timeout=timeout) except Empty: break else: spawn(message.ack) print(message.payload)
def test_interthread_external_lock(self): call_list = [] @lockutils.synchronized("foo", external=True, lock_path=self.lock_dir) def foo(param): """Simulate a long-running threaded operation.""" call_list.append(param) # NOTE(bnemec): This is racy, but I don't want to add any # synchronization primitives that might mask a problem # with the one we're trying to test here. time.sleep(0.5) call_list.append(param) def other(param): foo(param) thread = eventlet.spawn(other, "other") # Make sure the other thread grabs the lock start = time.time() while not os.path.exists(os.path.join(self.lock_dir, "foo")): if time.time() - start > 5: self.fail("Timed out waiting for thread to grab lock") time.sleep(0) thread1 = eventlet.spawn(other, "main") thread1.wait() thread.wait() self.assertEqual(call_list, ["other", "other", "main", "main"])
def test_service_disconnect_with_active_async_worker( container_factory, rabbit_manager, rabbit_config): """ Break the connection between a service's queue consumer and rabbit while the service has an active async worker (e.g. event handler). """ container = container_factory(ExampleService, rabbit_config) container.start() # get the service's queue consumer connection while we know it's the # only active connection vhost = rabbit_config['vhost'] connections = get_rabbit_connections(vhost, rabbit_manager) assert len(connections) == 1 queue_consumer_conn = connections[0]['name'] # disconnect the service's queue consumer while it's running the worker eventlet.spawn(disconnect_on_event, rabbit_manager, queue_consumer_conn) # dispatch an event data = uuid.uuid4().hex dispatch = event_dispatcher(rabbit_config) dispatch('srcservice', 'exampleevent', data) # `handle` will have been called twice with the same the `data`, because # rabbit will have redelivered the un-ack'd message from the first call def event_handled_twice(): assert handle_called.call_args_list == [call(data), call(data)] assert_stops_raising(event_handled_twice) connections = get_rabbit_connections(vhost, rabbit_manager) assert queue_consumer_conn not in [conn['name'] for conn in connections]
def test_recv_during_send(self): sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ) sleep() done = event.Event() try: SNDHWM = zmq.SNDHWM except AttributeError: # ZeroMQ <3.0 SNDHWM = zmq.HWM sender.setsockopt(SNDHWM, 10) sender.setsockopt(zmq.SNDBUF, 10) receiver.setsockopt(zmq.RCVBUF, 10) def tx(): tx_i = 0 while tx_i <= 1000: sender.send(str(tx_i)) tx_i += 1 done.send(0) spawn(tx) final_i = done.wait() self.assertEqual(final_i, 0)
def test_nested_acquire(self): q = zmq._QueueLock() self.assertFalse(q) q.acquire() q.acquire() s = semaphore.Semaphore(0) results = [] def lock(x): with q: results.append(x) s.release() spawn(lock, 1) sleep() self.assertEqual(results, []) q.release() sleep() self.assertEqual(results, []) self.assertTrue(q) q.release() s.acquire() self.assertEqual(results, [1])
def test_waiters_get_woken(self): # verify that when there's someone waiting on an empty pool # and someone puts an immediately-closed connection back in # the pool that the waiter gets woken self.pool.put(self.connection) self.pool.clear() self.pool = self.create_pool(max_size=1, max_age=0) self.connection = self.pool.get() self.assertEqual(self.pool.free(), 0) self.assertEqual(self.pool.waiting(), 0) e = event.Event() def retrieve(pool, ev): c = pool.get() ev.send(c) eventlet.spawn(retrieve, self.pool, e) eventlet.sleep(0) # these two sleeps should advance the retrieve eventlet.sleep(0) # coroutine until it's waiting in get() self.assertEqual(self.pool.free(), 0) self.assertEqual(self.pool.waiting(), 1) self.pool.put(self.connection) timer = eventlet.Timeout(1) conn = e.wait() timer.cancel() self.assertEqual(self.pool.free(), 0) self.assertEqual(self.pool.waiting(), 0) self.pool.put(conn)
def test_stop_during_kill(container, logger): """ Verify we handle the race condition when a runner tries to stop a container while it is being killed. """ with patch.object( container, '_kill_active_threads', autospec=True) as kill_threads: # force eventlet yield during kill() so stop() will be scheduled kill_threads.side_effect = eventlet.sleep # manufacture an exc_info to kill with try: raise Exception('error') except: pass exc_info = sys.exc_info() eventlet.spawn(container.kill, exc_info) eventlet.spawn(container.stop) with pytest.raises(Exception): container.wait() assert logger.debug.call_args_list == [ call("already being killed %s", container), ]
def test_published_concurrency(self): """This test the concurrent access to the local queue of the rpc publisher """ def faux_cast_go(context, topic, msg): self.published.append((topic, msg)) def faux_cast_wait(context, topic, msg): self.stubs.Set(oslo_rpc, 'cast', faux_cast_go) # Sleep to simulate concurrency and allow other threads to work eventlet.sleep(0) self.published.append((topic, msg)) self.stubs.Set(oslo_rpc, 'cast', faux_cast_wait) publisher = rpc.RPCPublisher(network_utils.urlsplit('rpc://')) job1 = eventlet.spawn(publisher.publish_samples, None, self.test_data) job2 = eventlet.spawn(publisher.publish_samples, None, self.test_data) job1.wait() job2.wait() self.assertEqual(publisher.policy, 'default') self.assertEqual(len(self.published), 2) self.assertEqual(len(publisher.local_queue), 0)
def test_two_simultaneous_connections(self): # timing-sensitive test, disabled until we come up with a better # way to do this self.pool = self.create_pool(max_size=2) conn = self.pool.get() self.set_up_dummy_table(conn) self.fill_up_table(conn) curs = conn.cursor() conn2 = self.pool.get() self.set_up_dummy_table(conn2) self.fill_up_table(conn2) curs2 = conn2.cursor() results = [] LONG_QUERY = "select * from test_table" SHORT_QUERY = "select * from test_table where row_id <= 20" evt = event.Event() def long_running_query(): self.assert_cursor_works(curs) curs.execute(LONG_QUERY) results.append(1) evt.send() evt2 = event.Event() def short_running_query(): self.assert_cursor_works(curs2) curs2.execute(SHORT_QUERY) results.append(2) evt2.send() eventlet.spawn(long_running_query) eventlet.spawn(short_running_query) evt.wait() evt2.wait() results.sort() self.assertEqual([1, 2], results)
def test_published_concurrency(self): """Test concurrent access to the local queue of the rpc publisher.""" publisher = self.publisher_cls( netutils.urlsplit('%s://' % self.protocol)) with mock.patch.object(publisher, '_send') as fake_send: def fake_send_wait(ctxt, topic, meters): fake_send.side_effect = mock.Mock() # Sleep to simulate concurrency and allow other threads to work eventlet.sleep(0) fake_send.side_effect = fake_send_wait job1 = eventlet.spawn(getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) job2 = eventlet.spawn(getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) job1.wait() job2.wait() self.assertEqual('default', publisher.policy) self.assertEqual(2, len(fake_send.mock_calls)) self.assertEqual(0, len(publisher.local_queue))
def test_spin(self): "Tests the Spin action" # Set the balancer up to return a Spin balancer = MockBalancer() action = Spin(balancer, "aeracode.org", "aeracode.org", timeout=2, check_interval=1) balancer.fixed_action = action # Ensure it times out sock = MockSocket() try: with Timeout(2.2): start = time.time() action.handle(sock, "", "/", {}) duration = time.time() - start except Timeout: self.fail("Spin lasted for too long") self.assert_(duration >= 1, "Spin did not last for long enough") self.assertEqual( open(os.path.join(os.path.dirname(__file__), "..", "static", "timeout.http")).read(), sock.data ) # Now, ensure it picks up a change sock = MockSocket() try: with Timeout(2): def host_changer(): eventlet.sleep(0.7) balancer.fixed_action = Empty(balancer, "aeracode.org", "aeracode.org", code=402) eventlet.spawn(host_changer) action.handle(sock, "", "/", {}) except Timeout: self.fail("Spin lasted for too long") self.assertEqual("HTTP/1.0 402 Payment Required\r\nConnection: close\r\nContent-length: 0\r\n\r\n", sock.data)
def run(self): for (iface, port), app in self._wsgi_apps.items(): print "Orbited listening on http://%s:%s" % (iface or "0.0.0.0", port) eventlet.spawn(eventlet.wsgi.server, eventlet.listen((iface,port)), app, log=EmptyLogShim()) ev = eventlet.event.Event() eventlet.spawn(self._run, ev) return ev
def test_queue(session): import recipes q = recipes.ZKQueue(session, "/myqueue", [ZOO_OPEN_ACL_UNSAFE]) q.enqueue("Zoo") q.enqueue("Keeper") def dequeue_thread(): while True: value = q.dequeue() print "from dequeue", value if value == "EOF": return def enqueue_thread(): for i in range(10): q.enqueue("value%i" % (i,)) eventlet.sleep(1) q.enqueue("EOF") dt = eventlet.spawn(dequeue_thread) et = eventlet.spawn(enqueue_thread) et.wait() dt.wait()
def test_index_request(ctx): from melkman.db.remotefeed import RemoteFeed from melkman.fetch import request_feed_index from melkman.fetch.worker import run_feed_indexer from eventlet import sleep, spawn # start a feed indexer indexer = spawn(run_feed_indexer, ctx) #start a web server... www = os.path.join(data_path(), 'www') ts = FileServer(www) ts_proc = spawn(ts.run) try: test_url = ts.url_for('good.xml') assert RemoteFeed.get_by_url(test_url, ctx) is None # make an index request... request_feed_index(test_url, ctx) sleep(.5) rf = RemoteFeed.get_by_url(test_url, ctx) assert rf is not None assert len(rf.entries.keys()) == 2 finally: indexer.kill() indexer.wait() ts_proc.kill() ts_proc.wait()
def test_published_concurrency(self): """This test the concurrent access to the local queue of the rpc publisher """ def faux_cast_go(context, topic, msg): self.published.append((topic, msg)) def faux_cast_wait(context, topic, msg): self.useFixture(fixtures.MonkeyPatch("ceilometer.openstack.common.rpc.cast", faux_cast_go)) # Sleep to simulate concurrency and allow other threads to work eventlet.sleep(0) self.published.append((topic, msg)) self.useFixture(fixtures.MonkeyPatch("ceilometer.openstack.common.rpc.cast", faux_cast_wait)) publisher = rpc.RPCPublisher(network_utils.urlsplit("rpc://")) job1 = eventlet.spawn(publisher.publish_samples, None, self.test_data) job2 = eventlet.spawn(publisher.publish_samples, None, self.test_data) job1.wait() job2.wait() self.assertEqual(publisher.policy, "default") self.assertEqual(len(self.published), 2) self.assertEqual(len(publisher.local_queue), 0)
def _recv_loop(self): buf = b'' while True: try: data = self._socket.recv(512) except greenlet.GreenletExit: raise except Exception: eventlet.spawn(self.reconnect) buf += data pos = buf.find(b"\n") while pos >= 0: line = unquote_plus(force_str(buf[0:pos])) parts = line.split(' ') if len(parts) >= 3: parts = line.split(' ', 2) parts.pop(0) self._recv_queue.put(parts) else: # print len(parts), line pass buf = buf[pos + 1:] pos = buf.find(b"\n")
def test_parallel_builds(self): stubs.stubout_loopingcall_delay(self.stubs) def _do_build(id, proj, user, *args): values = { 'id': id, 'project_id': proj, 'user_id': user, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large 'os_type': 'linux', 'architecture': 'x86-64'} network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, {'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway6': 'dead:beef::1', 'ip6s': [{'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64'}], 'ips': [{'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0'}], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] instance = db.instance_create(self.context, values) self.conn.spawn(instance, network_info) gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id) gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id) gt1.wait() gt2.wait()
def test_waiting(self): pool = greenpool.GreenPool(1) done = event.Event() def consume(): done.wait() def waiter(pool): gt = pool.spawn(consume) gt.wait() waiters = [] self.assertEqual(pool.running(), 0) waiters.append(eventlet.spawn(waiter, pool)) eventlet.sleep(0) self.assertEqual(pool.waiting(), 0) waiters.append(eventlet.spawn(waiter, pool)) eventlet.sleep(0) self.assertEqual(pool.waiting(), 1) waiters.append(eventlet.spawn(waiter, pool)) eventlet.sleep(0) self.assertEqual(pool.waiting(), 2) self.assertEqual(pool.running(), 1) done.send(None) for w in waiters: w.wait() self.assertEqual(pool.waiting(), 0) self.assertEqual(pool.running(), 0)
def replicate(self): """Run a replication pass""" self.start = time.time() self.suffix_count = 0 self.suffix_sync = 0 self.suffix_hash = 0 self.replication_count = 0 self.last_replication_count = -1 self.partition_times = [] stats = eventlet.spawn(self.heartbeat) lockup_detector = eventlet.spawn(self.detect_lockups) eventlet.sleep() # Give spawns a cycle try: self.run_pool = GreenPool(size=self.concurrency) jobs = self.collect_jobs() for job in jobs: if not self.check_ring(): self.logger.info(_("Ring change detected. Aborting " "current replication pass.")) return if job['delete']: self.run_pool.spawn(self.update_deleted, job) else: self.run_pool.spawn(self.update, job) with Timeout(self.lockup_timeout): self.run_pool.waitall() except (Exception, Timeout): self.logger.exception(_("Exception in top-level replication loop")) self.kill_coros() finally: stats.kill() lockup_detector.kill() self.stats_line()
def initialize(self): # Extend extension to service mapping dict p_const.EXT_TO_SERVICE_MAPPING['cisco_n1kv_profile'] = (n1kv_const. CISCO_N1KV) self.n1kvclient = n1kv_client.Client() self.sync_obj = n1kv_sync.N1kvSyncDriver(db_base_plugin_v2. NeutronDbPluginV2()) eventlet.spawn(self.sync_obj.do_sync) # Get VLAN/VXLAN network profiles name self.netp_name = {p_const.TYPE_VLAN: (cfg.CONF.ml2_cisco_n1kv. default_vlan_network_profile), p_const.TYPE_VXLAN: (cfg.CONF.ml2_cisco_n1kv. default_vxlan_network_profile)} # Ensure network profiles are created on the VSM try: self._ensure_network_profiles_created_on_vsm() except (n1kv_exc.VSMConnectionFailed, n1kv_exc.VSMError): LOG.error(_LE("VSM Failed to create default network profiles.")) self.vif_type = portbindings.VIF_TYPE_OVS self.vif_details = {portbindings.CAP_PORT_FILTER: True, portbindings.OVS_HYBRID_PLUG: True} self.supported_network_types = [p_const.TYPE_VLAN, p_const.TYPE_VXLAN]
def test_published_concurrency(self): """This test the concurrent access to the local queue of the rpc publisher """ publisher = rpc.RPCPublisher(network_utils.urlsplit('rpc://')) cast_context = mock.MagicMock() with mock.patch.object(publisher.rpc_client, 'prepare') as prepare: def fake_prepare_go(topic): return cast_context def fake_prepare_wait(topic): prepare.side_effect = fake_prepare_go # Sleep to simulate concurrency and allow other threads to work eventlet.sleep(0) return cast_context prepare.side_effect = fake_prepare_wait job1 = eventlet.spawn(publisher.publish_samples, mock.MagicMock(), self.test_data) job2 = eventlet.spawn(publisher.publish_samples, mock.MagicMock(), self.test_data) job1.wait() job2.wait() self.assertEqual('default', publisher.policy) self.assertEqual(2, len(cast_context.cast.mock_calls)) self.assertEqual(0, len(publisher.local_queue))
def _attribschanged(self, nodeattribs, configmanager, **kwargs): if 'console.logging' in nodeattribs[self.node]: # decide whether logging changes how we react or not self._dologging = True logvalue = 'full' attributevalue = configmanager.get_node_attributes( (self.node,), ('console.logging',)) try: logvalue = \ attributevalue[self.node]['console.logging']['value'] except KeyError: pass if logvalue in ('full', ''): # if the *only* thing to change is the log, # then let always on handle reconnect if needed, # since we want to avoid a senseless disconnect # if already connected # if other things change, then unconditionally reconnect onlylogging = len(nodeattribs[self.node]) == 1 self._alwayson(doconnect=onlylogging) if onlylogging: return else: self._ondemand() if logvalue == 'none': self._dologging = False if not self._isondemand or self.livesessions: eventlet.spawn(self._connect)
def consume(self, sock): ipc_dir = CONF.rpc_zmq_ipc_dir #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() msg_id, topic, style, in_msg = data topic = topic.split('.', 1)[0] LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) if topic.startswith('fanout~') or topic.startswith('zmq_replies'): sock_type = zmq.PUB else: sock_type = zmq.PUSH if topic not in self.topic_proxy: def publisher(waiter): LOG.info(_("Creating proxy for topic: %s"), topic) try: out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue( CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(.5) waiter.send(True) while(True): data = self.topic_proxy[topic].get() out_sock.send(data) LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data}) wait_sock_creation = eventlet.event.Event() eventlet.spawn(publisher, wait_sock_creation) try: wait_sock_creation.wait() except RPCException: LOG.error(_("Topic socket file creation failed.")) return try: self.topic_proxy[topic].put_nowait(data) LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") % {'data': data}) except eventlet.queue.Full: LOG.error(_("Local per-topic backlog buffer full for topic " "%(topic)s. Dropping message.") % {'topic': topic})
def test_proxy_disconnect_with_active_worker(container_factory, rabbit_manager, rabbit_config): """ Break the connection to rabbit while a service's queue consumer and rabbit while the service has an in-flight rpc request (i.e. it is waiting on a reply). """ # ExampleService is the target; ProxyService has the rpc_proxy; proxy_container = container_factory(ProxyService, rabbit_config) example_container = container_factory(ExampleService, rabbit_config) proxy_container.start() # get proxyservice's queue consumer connection while we know it's the # only active connection vhost = rabbit_config['vhost'] connections = get_rabbit_connections(vhost, rabbit_manager) assert len(connections) == 1 proxy_consumer_conn = connections[0]['name'] example_container.start() # there should now be two connections: # 1. the queue consumer from proxyservice # 2. the queue consumer from exampleservice connections = get_rabbit_connections(vhost, rabbit_manager) assert len(connections) == 2 # disconnect proxyservice's queue consumer while its request is in-flight eventlet.spawn(disconnect_on_event, rabbit_manager, proxy_consumer_conn) with entrypoint_hook(proxy_container, 'entrypoint') as entrypoint: # we should receive a response after reconnection assert entrypoint('hello') == 'hello' connections = get_rabbit_connections(vhost, rabbit_manager) assert proxy_consumer_conn not in [conn['name'] for conn in connections]
def _child_process(self, server): # Setup child signal handlers differently def _sigterm(*args): signal.signal(signal.SIGTERM, signal.SIG_DFL) raise SignalExit(signal.SIGTERM) signal.signal(signal.SIGTERM, _sigterm) # Block SIGINT and let the parent send us a SIGTERM # signal.signal(signal.SIGINT, signal.SIG_IGN) # This differs from the behavior in nova in that we dont ignore this # It allows the non-wsgi services to be terminated properly signal.signal(signal.SIGINT, _sigterm) # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad eventlet.hubs.use_hub() # Close write to ensure only parent has it open os.close(self.writepipe) # Create greenthread to watch for parent to close pipe eventlet.spawn(self._pipe_watcher) # Reseed random number generator random.seed() launcher = Launcher() launcher.run_server(server)
def test_get_connection_concurrency(self, mock_conn, mock_event): def get_conn_currency(host): host.get_connection().getLibVersion() def connect_with_block(*a, **k): # enough to allow another connect to run eventlet.sleep(0) self.connect_calls += 1 return fakelibvirt.openAuth("qemu:///system", [[], lambda: 1, None], 0) def fake_register(*a, **k): self.register_calls += 1 self.connect_calls = 0 self.register_calls = 0 mock_conn.side_effect = connect_with_block mock_event.side_effect = fake_register # call concurrently thr1 = eventlet.spawn(get_conn_currency, self.host) thr2 = eventlet.spawn(get_conn_currency, self.host) # let threads run eventlet.sleep(0) thr1.wait() thr2.wait() self.assertEqual(self.connect_calls, 1) self.assertEqual(self.register_calls, 1)
def _periodics_watchdog(self, callable_, activity, spacing, exc_info, traceback=None): LOG.exception("The periodic %(callable)s failed with: %(exception)s", { 'exception': ''.join(traceback_mod.format_exception(*exc_info)), 'callable': reflection.get_callable_name(callable_)}) # NOTE(milan): spawn new thread otherwise waiting would block eventlet.spawn(self.del_host)
def test_closure(self): def spam_to_me(address): sock = eventlet.connect(address) while True: try: sock.sendall(b'hello world') except socket.error as e: if get_errno(e) == errno.EPIPE: return raise server = eventlet.listen(('127.0.0.1', 0)) sender = eventlet.spawn(spam_to_me, server.getsockname()) client, address = server.accept() server.close() def reader(): try: while True: data = client.recv(1024) assert data except socket.error as e: # we get an EBADF because client is closed in the same process # (but a different greenthread) if get_errno(e) != errno.EBADF: raise def closer(): client.close() reader = eventlet.spawn(reader) eventlet.spawn_n(closer) reader.wait() sender.wait()
def start(self): self.should_run = True if self.thread is None: self.thread = eventlet.spawn(self._run)
def periodic_resync(self): """Spawn a thread to periodically resync the dhcp state.""" eventlet.spawn(self._periodic_resync_helper)
def start_ready_ports_loop(self): """Spawn a thread to push changed ports to server.""" eventlet.spawn(self._dhcp_ready_ports_loop)
def __init__(self, host): self._init_ha_conf_path() super(AgentMixin, self).__init__(host) self.state_change_notifier = batch_notifier.BatchNotifier( self._calculate_batch_duration(), self.notify_server) eventlet.spawn(self._start_keepalived_notifications_server)
def test_state_wrapping(self): # Test that we behave correctly if a thread waits, and the server state # has wrapped when it it next scheduled # Ensure that if 2 threads wait for the completion of 'start', the # first will wait until complete_event is signalled, but the second # will continue complete_event = threading.Event() complete_waiting_callback = threading.Event() start_state = self.server._states['start'] old_wait_for_completion = start_state.wait_for_completion waited = [False] def new_wait_for_completion(*args, **kwargs): if not waited[0]: waited[0] = True complete_waiting_callback.set() complete_event.wait() old_wait_for_completion(*args, **kwargs) start_state.wait_for_completion = new_wait_for_completion # thread1 will wait for start to complete until we signal it thread1 = eventlet.spawn(self.server.stop) thread1_finished = threading.Event() thread1.link(lambda _: thread1_finished.set()) self.server.start() complete_waiting_callback.wait() # The server should have started, but stop should not have been called self.assertEqual(1, len(self.executors)) self.assertEqual(self.executors[0]._calls, []) self.assertFalse(thread1_finished.is_set()) self.server.stop() self.server.wait() # We should have gone through all the states, and thread1 should still # be waiting self.assertEqual(1, len(self.executors)) self.assertEqual(self.executors[0]._calls, ['shutdown']) self.assertFalse(thread1_finished.is_set()) # Start again self.server.start() # We should now record 4 executors (2 for each server) self.assertEqual(2, len(self.executors)) self.assertEqual(self.executors[0]._calls, ['shutdown']) self.assertEqual(self.executors[1]._calls, []) self.assertFalse(thread1_finished.is_set()) # Allow thread1 to complete complete_event.set() thread1_finished.wait() # thread1 should now have finished, and stop should not have been # called again on either the first or second executor self.assertEqual(2, len(self.executors)) self.assertEqual(self.executors[0]._calls, ['shutdown']) self.assertEqual(self.executors[1]._calls, []) self.assertTrue(thread1_finished.is_set())
def test_wait_for_running_task(self): # Test that if 2 threads call a method simultaneously, both will wait, # but only 1 will call the underlying executor method. start_event = threading.Event() finish_event = threading.Event() running_event = threading.Event() done_event = threading.Event() _runner = [None] class SteppingFakeExecutor(self.server._executor_cls): def __init__(self, *args, **kwargs): # Tell the test which thread won the race _runner[0] = eventlet.getcurrent() running_event.set() start_event.wait() super(SteppingFakeExecutor, self).__init__(*args, **kwargs) done_event.set() finish_event.wait() self.server._executor_cls = SteppingFakeExecutor start1 = eventlet.spawn(self.server.start) start2 = eventlet.spawn(self.server.start) # Wait until one of the threads starts running running_event.wait() runner = _runner[0] waiter = start2 if runner == start1 else start2 waiter_finished = threading.Event() waiter.link(lambda _: waiter_finished.set()) # At this point, runner is running start(), and waiter() is waiting for # it to complete. runner has not yet logged anything. self.assertEqual(0, len(self.executors)) self.assertFalse(waiter_finished.is_set()) # Let the runner log the call start_event.set() done_event.wait() # We haven't signalled completion yet, so submit shouldn't have run self.assertEqual(1, len(self.executors)) self.assertEqual(self.executors[0]._calls, []) self.assertFalse(waiter_finished.is_set()) # Let the runner complete finish_event.set() waiter.wait() runner.wait() # Check that both threads have finished, start was only called once, # and execute ran self.assertTrue(waiter_finished.is_set()) self.assertEqual(1, len(self.executors)) self.assertEqual(self.executors[0]._calls, [])
data_s = f'(User {user} connected)\n' p.send(bytearray(data_s, 'utf-8')) except socket.error as e: # ignore broken pipes, they just mean the participant # closed its connection already if e[0] != 32: raise data = conn.recv(1024) participants.remove(conn) for p in participants: p.send(bytearray(f'(User {user} leave)\n', 'utf-8')) print(f"Participant {user} left chat.") if __name__ == "__main__": port = 3490 try: print(f"ChatServer starting up on port {port}") server = eventlet.listen(('0.0.0.0', port)) while True: new_connection, address = server.accept() print(f"Participant {address} joined chat.") participants.add(new_connection) eventlet.spawn(new_chat_channel, new_connection) except (KeyboardInterrupt, SystemExit): print("ChatServer exiting.")
# Please note that an assertion here doesn't abort the server. items = line1.split(' ') assert 3 == len(items) assert items[2] in ('HTTP/1.0', 'HTTP/1.1') assert items[0] == 'GET' assert items[1].startswith('/') try: num = int(items[1][1:]) except ValueError: num = None # Write HTTP response. if num is None: writer.write('HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n') writer.write('<a href="/0">start at 0</a><p>Hello, World!\n') else: next_num = lprng.Lprng(num).next() writer.write('HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n') writer.write('<a href="/%d">continue with %d</a>\n' % (next_num, next_num)) if __name__ == '__main__': server = eventlet.listen(('127.0.0.1', 8080), backlog=128) print >> sys.stderr, 'info: listening on %r' % (server.getsockname(), ) while True: new_connection, peer_name = server.accept() print >> sys.stderr, 'info: connection from %r' % (peer_name, ) eventlet.spawn(Worker, new_connection) new_connection = None
1: frame_string1, 2: frame_string2, 3: frame_string3, 4: frame_string4, 5: frame_string5, 6: frame_string6, 7: frame_string7, 8: frame_string8 } #socket.emit('image',frame_string4) #ret, frame_encoded = cv2.imencode('.jpg',frame) #frame_string = base64.b64encode(frame_encoded).decode('utf8') socket.emit('image', json.dumps(frames)) hz = cap.get(cv2.CAP_PROP_FPS) #print(hz) eventlet.sleep(max(0, (1 / hz) - (time.time() - time_s))) #cap.close() print('closing') if __name__ == '__main__': eventlet.spawn(sending) try: socket.run(app, host='127.0.0.1') except KeyboardInterrupt: print("here") finally: kill_all = True print("done")
def activateThread(self, functionName, name, listOfLocations): if functionName == "flight": eventlet.spawn(self.flight, listOfLocations, name)
def spawn_accepts(): events = [] for _junk in xrange(2): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr, 201)) return events
def consume(self, sock): ipc_dir = CONF.rpc_zmq_ipc_dir #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() topic = data[1] LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) if topic.startswith('fanout~'): sock_type = zmq.PUB topic = topic.split('.', 1)[0] elif topic.startswith('zmq_replies'): sock_type = zmq.PUB else: sock_type = zmq.PUSH if topic not in self.topic_proxy: def publisher(waiter): LOG.info(_("Creating proxy for topic: %s"), topic) try: # The topic is received over the network, # don't trust this input. if self.badchars.search(topic) is not None: emsg = _("Topic contained dangerous characters.") LOG.warn(emsg) raise RPCException(emsg) out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue( CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(.5) waiter.send(True) while (True): data = self.topic_proxy[topic].get() out_sock.send(data) LOG.debug( _("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data}) wait_sock_creation = eventlet.event.Event() eventlet.spawn(publisher, wait_sock_creation) try: wait_sock_creation.wait() except RPCException: LOG.error(_("Topic socket file creation failed.")) return try: self.topic_proxy[topic].put_nowait(data) LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") % {'data': data}) except eventlet.queue.Full: LOG.error( _("Local per-topic backlog buffer full for topic " "%(topic)s. Dropping message.") % {'topic': topic})
def schedule_thread(self, thread): import eventlet g = eventlet.spawn(thread) eventlet.sleep(0) return g
class TestContainerUpdater(unittest.TestCase): def setUp(self): utils.HASH_PATH_SUFFIX = 'endcap' utils.HASH_PATH_PREFIX = 'startcap' self.testdir = os.path.join(mkdtemp(), 'tmp_test_container_updater') rmtree(self.testdir, ignore_errors=1) os.mkdir(self.testdir) ring_file = os.path.join(self.testdir, 'account.ring.gz') with closing(GzipFile(ring_file, 'wb')) as f: pickle.dump( RingData([[0, 1, 0, 1], [1, 0, 1, 0]], [{'id': 0, 'ip': '127.0.0.1', 'port': 12345, 'device': 'sda1', 'zone': 0}, {'id': 1, 'ip': '127.0.0.1', 'port': 12345, 'device': 'sda1', 'zone': 2}], 30), f) self.devices_dir = os.path.join(self.testdir, 'devices') os.mkdir(self.devices_dir) self.sda1 = os.path.join(self.devices_dir, 'sda1') os.mkdir(self.sda1) def tearDown(self): rmtree(os.path.dirname(self.testdir), ignore_errors=1) def test_creation(self): cu = container_updater.ContainerUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '2', 'node_timeout': '5', }) self.assert_(hasattr(cu, 'logger')) self.assert_(cu.logger is not None) self.assertEquals(cu.devices, self.devices_dir) self.assertEquals(cu.interval, 1) self.assertEquals(cu.concurrency, 2) self.assertEquals(cu.node_timeout, 5) self.assert_(cu.get_account_ring() is not None) def test_run_once(self): cu = container_updater.ContainerUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15', 'account_suppression_time': 0 }) cu.run_once() containers_dir = os.path.join(self.sda1, container_server.DATADIR) os.mkdir(containers_dir) cu.run_once() self.assert_(os.path.exists(containers_dir)) subdir = os.path.join(containers_dir, 'subdir') os.mkdir(subdir) cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a', container='c') cb.initialize(normalize_timestamp(1)) cu.run_once() info = cb.get_info() self.assertEquals(info['object_count'], 0) self.assertEquals(info['bytes_used'], 0) self.assertEquals(info['reported_object_count'], 0) self.assertEquals(info['reported_bytes_used'], 0) cb.put_object('o', normalize_timestamp(2), 3, 'text/plain', '68b329da9893e34099c7d8ad5cb9c940') cu.run_once() info = cb.get_info() self.assertEquals(info['object_count'], 1) self.assertEquals(info['bytes_used'], 3) self.assertEquals(info['reported_object_count'], 0) self.assertEquals(info['reported_bytes_used'], 0) def accept(sock, addr, return_code): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() self.assertEquals(inc.readline(), 'PUT /sda1/0/a/c HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() self.assert_('x-put-timestamp' in headers) self.assert_('x-delete-timestamp' in headers) self.assert_('x-object-count' in headers) self.assert_('x-bytes-used' in headers) except BaseException, err: import traceback traceback.print_exc() return err return None bindsock = listen(('127.0.0.1', 0)) def spawn_accepts(): events = [] for _junk in xrange(2): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr, 201)) return events spawned = spawn(spawn_accepts) for dev in cu.get_account_ring().devs: if dev is not None: dev['port'] = bindsock.getsockname()[1] cu.run_once() for event in spawned.wait(): err = event.wait() if err: raise err info = cb.get_info() self.assertEquals(info['object_count'], 1) self.assertEquals(info['bytes_used'], 3) self.assertEquals(info['reported_object_count'], 1) self.assertEquals(info['reported_bytes_used'], 3)
def start(self): """Start serving a WSGI application. :returns: None :raises: meteos.exception.InvalidInput """ # The server socket object will be closed after server exits, # but the underlying file descriptor will remain open, and will # give bad file descriptor error. So duplicating the socket object, # to keep file descriptor usable. config.set_middleware_defaults() dup_socket = self._socket.dup() netutils.set_tcp_keepalive( dup_socket, tcp_keepalive=CONF.tcp_keepalive, tcp_keepidle=CONF.tcp_keepidle, tcp_keepalive_interval=CONF.tcp_keepalive_interval, tcp_keepalive_count=CONF.tcp_keepalive_count) if self._use_ssl: try: ssl_kwargs = { 'server_side': True, 'certfile': CONF.ssl_cert_file, 'keyfile': CONF.ssl_key_file, 'cert_reqs': ssl.CERT_NONE, } if CONF.ssl_ca_file: ssl_kwargs['ca_certs'] = CONF.ssl_ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED dup_socket = ssl.wrap_socket(dup_socket, **ssl_kwargs) dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except Exception: with excutils.save_and_reraise_exception(): LOG.error( "Failed to start %(name)s on %(_host)s:%(_port)s " "with SSL support.", { "name": self.name, "_host": self._host, "_port": self._port }) wsgi_kwargs = { 'func': eventlet.wsgi.server, 'sock': dup_socket, 'site': self.app, 'protocol': self._protocol, 'custom_pool': self._pool, 'log': self._logger, 'socket_timeout': self.client_socket_timeout, 'keepalive': CONF.wsgi_keep_alive, } self._server = eventlet.spawn(**wsgi_kwargs)
import traceback traceback.print_exc() return err return None bindsock = listen(('127.0.0.1', 0)) def spawn_accepts(): events = [] for _junk in xrange(2): with Timeout(3): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr)) return events spawned = spawn(spawn_accepts) for dev in cu.get_account_ring().devs: if dev is not None: dev['port'] = bindsock.getsockname()[1] cu.run_once() for event in spawned.wait(): err = event.wait() if err: raise err info = cb.get_info() self.assertEquals(info['object_count'], 1) self.assertEquals(info['bytes_used'], 3) self.assertEquals(info['reported_object_count'], 1) self.assertEquals(info['reported_bytes_used'], 3)
def test_global(self): lst = [1] eventlet.spawn(hubs.get_hub().schedule_call_global, DELAY, lst.pop) eventlet.sleep(0) eventlet.sleep(DELAY * 2) assert lst == [], lst
def start(self): """Start serving a WSGI application. :returns: None """ # The server socket object will be closed after server exits, # but the underlying file descriptor will remain open, and will # give bad file descriptor error. So duplicating the socket object, # to keep file descriptor usable. dup_socket = self._socket.dup() dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): dup_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) if self._use_ssl: try: ca_file = CONF.ssl_ca_file cert_file = CONF.ssl_cert_file key_file = CONF.ssl_key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError( _("Unable to find cert_file : %s") % cert_file) if ca_file and not os.path.exists(ca_file): raise RuntimeError( _("Unable to find ca_file : %s") % ca_file) if key_file and not os.path.exists(key_file): raise RuntimeError( _("Unable to find key_file : %s") % key_file) if self._use_ssl and (not cert_file or not key_file): raise RuntimeError( _("When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file")) ssl_kwargs = { 'server_side': True, 'certfile': cert_file, 'keyfile': key_file, 'cert_reqs': ssl.CERT_NONE, } if CONF.ssl_ca_file: ssl_kwargs['ca_certs'] = ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED dup_socket = eventlet.wrap_ssl(dup_socket, **ssl_kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to start %(name)s on %(host)s" ":%(port)s with SSL support"), {'name': self.name, 'host': self.host, 'port': self.port}) wsgi_kwargs = { 'func': eventlet.wsgi.server, 'sock': dup_socket, 'site': self.app, 'protocol': self._protocol, 'custom_pool': self._pool, 'log': self._wsgi_logger, 'log_format': CONF.wsgi_log_format, 'debug': False, 'keepalive': CONF.wsgi_keep_alive, 'socket_timeout': self.client_socket_timeout } if self._max_url_len: wsgi_kwargs['url_length_limit'] = self._max_url_len self._server = eventlet.spawn(**wsgi_kwargs)
def in_process_setup(the_object_server=object_server): _info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS') _info('Using object_server class: %s' % the_object_server.__name__) conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR') if conf_src_dir is not None: if not os.path.isdir(conf_src_dir): msg = 'Config source %s is not a dir' % conf_src_dir raise InProcessException(msg) _info('Using config source dir: %s' % conf_src_dir) # If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then # prefer config files from there, otherwise read config from source tree # sample files. A mixture of files from the two sources is allowed. proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf') _info('Using proxy config from %s' % proxy_conf) swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf') _info('Using swift config from %s' % swift_conf_src) monkey_patch_mimetools() global _testdir _testdir = os.path.join(mkdtemp(), 'tmp_functional') utils.mkdirs(_testdir) rmtree(_testdir) utils.mkdirs(os.path.join(_testdir, 'sda1')) utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp')) utils.mkdirs(os.path.join(_testdir, 'sdb1')) utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp')) swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir) obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir) global orig_swift_conf_name orig_swift_conf_name = utils.SWIFT_CONF_FILE utils.SWIFT_CONF_FILE = swift_conf constraints.reload_constraints() storage_policy.SWIFT_CONF_FILE = swift_conf storage_policy.reload_storage_policies() global config if constraints.SWIFT_CONSTRAINTS_LOADED: # Use the swift constraints that are loaded for the test framework # configuration _c = dict( (k, str(v)) for k, v in constraints.EFFECTIVE_CONSTRAINTS.items()) config.update(_c) else: # In-process swift constraints were not loaded, somethings wrong raise SkipTest global orig_hash_path_suff_pref orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX utils.validate_hash_conf() # We create the proxy server listening socket to get its port number so # that we can add it as the "auth_port" value for the functional test # clients. prolis = eventlet.listen(('localhost', 0)) # The following set of configuration values is used both for the # functional test frame work and for the various proxy, account, container # and object servers. config.update({ # Values needed by the various in-process swift servers 'devices': _testdir, 'swift_dir': _testdir, 'mount_check': 'false', 'client_timeout': '4', 'allow_account_management': 'true', 'account_autocreate': 'true', 'allow_versions': 'True', # Below are values used by the functional test framework, as well as # by the various in-process swift servers 'auth_host': '127.0.0.1', 'auth_port': str(prolis.getsockname()[1]), 'auth_ssl': 'no', 'auth_prefix': '/auth/', # Primary functional test account (needs admin access to the # account) 'account': 'test', 'username': '******', 'password': '******', # User on a second account (needs admin access to the account) 'account2': 'test2', 'username2': 'tester2', 'password2': 'testing2', # User on same account as first, but without admin access 'username3': 'tester3', 'password3': 'testing3', # Service user and prefix (emulates glance, cinder, etc. user) 'account5': 'test5', 'username5': 'tester5', 'password5': 'testing5', 'service_prefix': 'SERVICE', # For tempauth middleware. Update reseller_prefix 'reseller_prefix': 'AUTH, SERVICE', 'SERVICE_require_group': 'service' }) acc1lis = eventlet.listen(('localhost', 0)) acc2lis = eventlet.listen(('localhost', 0)) con1lis = eventlet.listen(('localhost', 0)) con2lis = eventlet.listen(('localhost', 0)) account_ring_path = os.path.join(_testdir, 'account.ring.gz') with closing(GzipFile(account_ring_path, 'wb')) as f: pickle.dump( ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]], [{ 'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', 'port': acc1lis.getsockname()[1] }, { 'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1', 'port': acc2lis.getsockname()[1] }], 30), f) container_ring_path = os.path.join(_testdir, 'container.ring.gz') with closing(GzipFile(container_ring_path, 'wb')) as f: pickle.dump( ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]], [{ 'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1', 'port': con1lis.getsockname()[1] }, { 'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1', 'port': con2lis.getsockname()[1] }], 30), f) eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0" # Turn off logging requests by the underlying WSGI software. eventlet.wsgi.HttpProtocol.log_request = lambda *a: None logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi') # Redirect logging other messages by the underlying WSGI software. eventlet.wsgi.HttpProtocol.log_message = \ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a) # Default to only 4 seconds for in-process functional test runs eventlet.wsgi.WRITE_TIMEOUT = 4 acc1srv = account_server.AccountController(config, logger=debug_logger('acct1')) acc2srv = account_server.AccountController(config, logger=debug_logger('acct2')) con1srv = container_server.ContainerController( config, logger=debug_logger('cont1')) con2srv = container_server.ContainerController( config, logger=debug_logger('cont2')) objsrvs = [(obj_sockets[index], the_object_server.ObjectController(config, logger=debug_logger( 'obj%d' % (index + 1)))) for index in range(len(obj_sockets))] logger = debug_logger('proxy') def get_logger(name, *args, **kwargs): return logger with mock.patch('swift.common.utils.get_logger', get_logger): with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware', FakeMemcacheMiddleware): try: app = loadapp(proxy_conf, global_conf=config) except Exception as e: raise InProcessException(e) nl = utils.NullLogger() prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl) acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl) acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl) con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl) con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl) objspa = [ eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl) for objsrv in objsrvs ] global _test_coros _test_coros = \ (prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa) # Create accounts "test" and "test2" def create_account(act): ts = utils.normalize_timestamp(time()) account_ring = Ring(_testdir, ring_name='account') partition, nodes = account_ring.get_nodes(act) for node in nodes: # Note: we are just using the http_connect method in the object # controller here to talk to the account server nodes. conn = swift.proxy.controllers.obj.http_connect( node['ip'], node['port'], node['device'], partition, 'PUT', '/' + act, { 'X-Timestamp': ts, 'x-trans-id': act }) resp = conn.getresponse() assert (resp.status == 201) create_account('AUTH_test') create_account('AUTH_test2')
def run(): parser = argparse.ArgumentParser( description='Open a public HTTP tunnel to a local server') parser.add_argument( '-s', dest='host', metavar='address', default='v2.localtunnel.com', help='localtunnel server address (default: v2.localtunnel.com)') parser.add_argument('--version', action='store_true', help='show version information for client and server') if '--version' in sys.argv: args = parser.parse_args() print "client: {}".format(VERSION) try: server_version = util.lookup_server_version(args.host) except: server_version = '??' print "server: {} ({})".format(server_version, args.host) sys.exit(0) parser.add_argument('-n', dest='name', metavar='name', default=str(uuid.uuid4()).split('-')[-1], help='name of the tunnel (default: randomly generate)') parser.add_argument('-c', dest='concurrency', type=int, metavar='concurrency', default=3, help='number of concurrent backend connections') parser.add_argument('port', metavar='port', type=int, help='local port of server to tunnel to') args = parser.parse_args() host = args.host.split(':') if len(host) == 1: backend_port = util.discover_backend_port(host[0]) else: backend_port = util.discover_backend_port(host[0], int(host[1])) backend = (host[0], backend_port) name = args.name client = util.client_name() port = args.port try: control = eventlet.connect(backend) control.sendall(protocol.VERSION) protocol.send_message( control, protocol.control_request( name=name, client=client, )) reply = protocol.recv_message(control) if reply and 'control' in reply: reply = reply['control'] def maintain_proxy_backend_pool(): pool = eventlet.greenpool.GreenPool(reply['concurrency']) while True: pool.spawn_n(open_proxy_backend, backend, port, name, client) proxying = eventlet.spawn(maintain_proxy_backend_pool) print " {0}".format(reply['banner']) print " Port {0} is now accessible from http://{1} ...\n".format( port, reply['host']) try: while True: message = protocol.recv_message(control) assert message == protocol.control_ping() protocol.send_message(control, protocol.control_pong()) except (IOError, AssertionError): proxying.kill() elif reply and 'error' in reply: print " ERROR: {0}".format(reply['message']) else: print " ERROR: Unexpected server reply." print " Make sure you have the latest version of the client." except KeyboardInterrupt: pass
eventlet.sleep(1) # TCP_FASTOPEN sock.setsockopt(socket.SOL_TCP, 23, 5) try: eventlet.wsgi.server(sock, resourcehandler, log=False, log_output=False, debug=False, socket_timeout=60) except TypeError: # Older eventlet in place, skip arguments it does not understand eventlet.wsgi.server(sock, resourcehandler, log=False, debug=False) class HttpApi(object): def __init__(self, bind_host=None, bind_port=None): self.server = None self.bind_host = bind_host or '::' self.bind_port = bind_port or 4005 def start(self): global auditlog global tracelog tracelog = log.Logger('trace') auditlog = log.Logger('audit') self.server = eventlet.spawn(serve, self.bind_host, self.bind_port) _cleaner = eventlet.spawn(_sessioncleaner)
def _wait_for_update(): self.model.update.wait() self.send_index_update() eventlet.spawn(_wait_for_update)
def spawn(self, *args, **kwargs): gthread = eventlet.spawn(*args, **kwargs) return gthread
def start(self): global auditlog global tracelog tracelog = log.Logger('trace') auditlog = log.Logger('audit') self.server = eventlet.spawn(serve, self.bind_host, self.bind_port)
def _spawn_checking_thread(self): self._monitor_processes = True eventlet.spawn(self._periodic_checking_thread)
def get_console_output(self, data): # Spawn as a greenthread, return control as soon as possible # to the console object eventlet.spawn(self._handle_console_output, data)
def test_run_once(self, mock_ismount): mock_ismount.return_value = True cu = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15' }) cu.run_once() async_dir = os.path.join(self.sda1, ASYNCDIR) os.mkdir(async_dir) cu.run_once() self.assert_(os.path.exists(async_dir)) # mount_check == False means no call to ismount self.assertEqual([], mock_ismount.mock_calls) cu = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'TrUe', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15' }) odd_dir = os.path.join(async_dir, 'not really supposed to be here') os.mkdir(odd_dir) cu.run_once() self.assert_(os.path.exists(async_dir)) self.assert_(not os.path.exists(odd_dir)) # mount_check == True means ismount was checked self.assertEqual([ mock.call(self.sda1), ], mock_ismount.mock_calls) ohash = hash_path('a', 'c', 'o') odir = os.path.join(async_dir, ohash[-3:]) mkdirs(odir) older_op_path = os.path.join( odir, '%s-%s' % (ohash, normalize_timestamp(time() - 1))) op_path = os.path.join(odir, '%s-%s' % (ohash, normalize_timestamp(time()))) for path in (op_path, older_op_path): with open(path, 'wb') as async_pending: pickle.dump( { 'op': 'PUT', 'account': 'a', 'container': 'c', 'obj': 'o', 'headers': { 'X-Container-Timestamp': normalize_timestamp(0) } }, async_pending) cu.logger = FakeLogger() cu.run_once() self.assert_(not os.path.exists(older_op_path)) self.assert_(os.path.exists(op_path)) self.assertEqual(cu.logger.get_increment_counts(), { 'failures': 1, 'unlinks': 1 }) self.assertEqual(None, pickle.load(open(op_path)).get('successes')) bindsock = listen(('127.0.0.1', 0)) def accepter(sock, return_code): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() self.assertEquals(inc.readline(), 'PUT /sda1/0/a/c/o HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() self.assert_('x-container-timestamp' in headers) except BaseException as err: return err return None def accept(return_codes): codes = iter(return_codes) try: events = [] for x in xrange(len(return_codes)): with Timeout(3): sock, addr = bindsock.accept() events.append(spawn(accepter, sock, codes.next())) for event in events: err = event.wait() if err: raise err except BaseException as err: return err return None event = spawn(accept, [201, 500, 500]) for dev in cu.get_container_ring().devs: if dev is not None: dev['port'] = bindsock.getsockname()[1] cu.logger = FakeLogger() cu.run_once() err = event.wait() if err: raise err self.assert_(os.path.exists(op_path)) self.assertEqual(cu.logger.get_increment_counts(), {'failures': 1}) self.assertEqual([0], pickle.load(open(op_path)).get('successes')) event = spawn(accept, [404, 500]) cu.logger = FakeLogger() cu.run_once() err = event.wait() if err: raise err self.assert_(os.path.exists(op_path)) self.assertEqual(cu.logger.get_increment_counts(), {'failures': 1}) self.assertEqual([0, 1], pickle.load(open(op_path)).get('successes')) event = spawn(accept, [201]) cu.logger = FakeLogger() cu.run_once() err = event.wait() if err: raise err self.assert_(not os.path.exists(op_path)) self.assertEqual(cu.logger.get_increment_counts(), { 'unlinks': 1, 'successes': 1 })
def _periodic_checking_thread(self): while self._monitor_processes: eventlet.sleep(self._config.AGENT.check_child_processes_interval) eventlet.spawn(self._check_child_processes)
def handle_quit(self, sig, frame): eventlet.spawn(super(EventletWorker, self).handle_quit, sig, frame)
def init(self, public_to_private_a, public_to_private_b): server = Server(self._controller_listen_port, self._controller.get_ip(), self._controller, public_to_private_a, public_to_private_b) eventlet.spawn(server.run)