def foo(): try: greenlet.getcurrent().parent.switch() except ValueError: seen.append(sys.exc_info()[1]) except greenlet.GreenletExit: raise AssertionError
def start(self): yappi.set_context_id_callback(lambda:id(greenlet.getcurrent())) yappi.set_context_name_callback(lambda:greenlet.getcurrent().__class__.__name__) yappi.set_clock_type('cpu') yappi.start(builtins=True) self._isStart = True return 'success'
def pause(self, timeout=-1): if timeout > -1: item = self.scheduled.add( timeout, getcurrent(), vanilla.exception.Timeout('timeout: %s' % timeout)) assert getcurrent() != self.loop, "cannot pause the main loop" resume = None try: resume = self.loop.switch() finally: if timeout > -1: if isinstance(resume, vanilla.exception.Timeout): raise resume # since we didn't timeout, remove ourselves from scheduled self.scheduled.remove(item) # TODO: rework State's is set test to be more natural if self.stopped.recver.ready: raise vanilla.exception.Stop( 'Hub stopped while we were paused. There must be a deadlock.') return resume
def run_1(): self.states.append(1) with self.lock: self.states.append(2) greenlet.getcurrent().parent.switch() self.states.append(4) self.states.append(5)
def run(self, duration): self.return_to = greenlet.getcurrent() self.schedule(greenlet.getcurrent(), duration) #schedule self, which will bounce back to this function and return start = time.time() self.loop.switch() #alternatively, if there are no events the loop will switch back self.realtime += time.time() - start
def worker(): # main and additional *finished* greenlets ll = greenlet.getcurrent().ll = [] def additional(): ll.append(greenlet.getcurrent()) for i in range(2): greenlet.greenlet(additional).switch() gg.append(weakref.ref(greenlet.getcurrent()))
def bar(): greenlet.getcurrent().parent.switch() # This final switch should go back to the main greenlet, since the # test_setparent() function in the C extension should have # reparented this greenlet. greenlet.getcurrent().parent.switch() raise AssertionError("Should never have reached this code")
def run_1(): self.states.append(1) self.lock.acquire() self.states.append(2) greenlet.getcurrent().parent.switch() self.states.append(4) self.lock.release() self.states.append(5)
def install_rdo_repo(shell, config, info, messages): """Installs RDO release repo RPM on all deployment hosts and enables testing repo in case it is required. """ global _RDO_REPO_VR if not config['repos/install_rdo']: return # parsing installed RDO release on localhost: we want to proceed with this # only once if not _RDO_REPO_VR: click.echo('Parsed RDO release version: ', nl=False) rc, out, err = execute( "rpm -q rdo-release --qf='%{version}-%{release}.%{arch}\n'", use_shell=True, ) match = re.match( r'^(?P<version>\w+)\-(?P<release>\d+\.[\d\w]+)\n', out ) version, release = match.group('version'), match.group('release') _RDO_REPO_VR = (version, release) click.echo('{version}-{release}'.format(**locals())) else: version, release = _RDO_REPO_VR click.echo('Installing RDO release on host {0}'.format(shell.host)) rdo_url = _RDO_REPO_URL.format(**locals()) rc, out, err = shell.execute( '(rpm -q "rdo-release-{version}" || yum install -y --nogpg {rdo_url})' ' || true'.format(**locals()) ) # install RDO repo on all hosts first and then proceed with enabling # proper repo greenlet.getcurrent().parent.switch() click.echo('Enabling proper repo on host {0}'.format(shell.host)) shell.execute('(rpm -q "yum-utils" || yum install -y yum-utils) || true') reponame = 'openstack-{}'.format(version) if config['repos/enable_rdo_testing']: cmd = 'yum-config-manager --enable {reponame}' else: cmd = ( 'yum-config-manager --disable {reponame}; ' 'yum-config-manager --enable {reponame}-testing' ) rc, out, err = shell.execute(cmd.format(**locals()), can_fail=False) match = re.search('enabled\s*=\s*(1|True)', out) if not match: msg = ( 'Failed to enable proper RDO repo on host {shell.host}:\n' 'RPM file seems to be installed, but appropriate repo file ' 'is probably missing in /etc/yum.repos.d/'.format(**locals()) ) raise tht_exceptions.PluginShellRuntimeError( msg, cmd=cmd, rc=rc, stdout=out, stderr=err )
def acquire(self, blocking=True, timeout=None): """Acquire a semaphore This function behaves like :meth:`threading.Lock.acquire`. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. """ if not blocking and timeout is not None: raise ValueError('must not specify timeout for non-blocking acquire') if not blocking and self.locked(): return False if isinstance(timeout, (float, int)) and timeout < 0: timeout = None if self.counter <= 0: self._waiters.add(greenlet.getcurrent()) try: if timeout is not None: ok = False with Timeout(timeout, False): while self.counter <= 0: hubs.get_hub().switch() ok = True if not ok: return False else: while self.counter <= 0: # running = hubs.get_hub().running # if not running: # log.warn('Loop is no longer running, potential deadlock: (at {}) {}\n' # 'waiters: {}' # .format(id(self), self, self._waiters)) # return hubs.get_hub().switch() finally: self._waiters.discard(greenlet.getcurrent()) self.counter -= 1 return True
def run_2(frozen): self.states.append(3) # the point of doing this as a timeout instead of a straight # callback is the order in which it would get called if the other # would resume iotimeout = time.time() + 2 self.ioloop.add_timeout(iotimeout, greenlet.getcurrent().switch) frozen.tick(delta=timedelta(seconds=4)) greenlet.getcurrent().parent.switch() self.states.append(4) frozen.tick(delta=timedelta(seconds=2)) greenlet.getcurrent().parent.switch()
def run_1(frozen): self.states.append(1) with self.lock: self.states.append(2) self.condition.wait(5) self.states.append(5) frozen.tick(delta=timedelta(seconds=90)) self.ioloop.add_callback(self.ioloop.stop) greenlet.getcurrent().parent.switch() raise Exception
def test_threaded_leak(self): gg = [] def worker(): # only main greenlet present gg.append(weakref.ref(greenlet.getcurrent())) for i in range(2): t = threading.Thread(target=worker) t.start() t.join() greenlet.getcurrent() # update ts_current gc.collect() for g in gg: self.assertTrue(g() is None)
def readline(self, size, timeout=None): assert getcurrent() != hub, 'could not call block func in main loop' if not timeout: return self._readline(size) else: assert not self.r_timer, 'duplicated r_timer' self.r_timer = timer(timeout) self.r_timer.start(getcurrent().throw, RpcError.Timeout(timeout)) try: return self._readline(size) finally: self.r_timer.stop() self.r_timer = None
def foo(): def bar(): greenlet.getcurrent().parent.switch() # This final switch should go back to the main greenlet, since # the test_setparent() function in the C extension should have # reparented this greenlet. greenlet.getcurrent().parent.switch() raise AssertionError("Should never have reached this code") child = greenlet.greenlet(bar) child.switch() greenlet.getcurrent().parent.switch(child) greenlet.getcurrent().parent.throw( AssertionError("Should never reach this code"))
def get(self): """If a value/an exception is stored, return/raise it. Otherwise until switch() or throw() is called.""" if self._exception is not _NONE: if self._exception is None: return self.value else: getcurrent().throw(*self._exception) else: assert self.greenlet is None, 'This Waiter is already used by %r' % (self.greenlet, ) self.greenlet = getcurrent() try: return self.hub.switch() finally: self.greenlet = None
def start(self): """Schedule the timeout. This is called on construction, so it should not be called explicitly, unless the timer has been canceled.""" assert not self.pending, \ '%r is already started; to restart it, cancel it first' % self if self.seconds is None: # "fake" timeout (never expires) self.timer = None elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self self.timer = get_hub().schedule_call_global( self.seconds, greenlet.getcurrent().throw, self) else: # regular timeout with user-provided exception self.timer = get_hub().schedule_call_global( self.seconds, greenlet.getcurrent().throw, self.exception) return self
def notifyall(self): handle = getcurrent().hub.handle for watch in self._consumers: # start_function just adds item into libev event loop # so we assume that ``wait()`` can't be called between iterations watch.start_function(handle, watch) self._consumers.clear()
def create_dict(self): """Create a new dict for the current greenlet, and return it.""" localdict = {} key = self.key greenlet = getcurrent() idt = id(greenlet) def local_deleted(_, key=key): # When the localimpl is deleted, remove the greenlet attribute. greenlet = wrgreenlet() if greenlet is not None: del greenlet.__dict__[key] def greenlet_deleted(_, idt=idt): # When the greenlet is deleted, remove the local dict. # Note that this is suboptimal if the greenlet object gets # caught in a reference loop. We would like to be called # as soon as the OS-level greenlet ends instead. local = wrlocal() if local is not None: local.dicts.pop(idt) wrlocal = ref(self, local_deleted) wrgreenlet = ref(greenlet, greenlet_deleted) greenlet.__dict__[key] = wrlocal self.dicts[idt] = wrgreenlet, localdict return localdict
def enter(self): self._g_root = greenlet.getcurrent() self._g_loop = greenlet.greenlet(self._loop) self.message_log = MessageLog() self.level = Level(self, self.DUNGEON_SIZE_X, self.DUNGEON_SIZE_Y) self.game.window.push_handlers(self) self._g_loop.switch()
def release(self): current = greenlet.getcurrent() assert self.holder is current, 'must be held' self.holder = None if self.waiters: waiter = self.waiters.pop(0) self.io_loop.add_callback(waiter.switch)
def create_connection(self): """Copy of BasePool.connect() """ assert greenlet.getcurrent().parent, "Should be on child greenlet" host, port = self.pair # Don't try IPv6 if we don't support it. Also skip it if host # is 'localhost' (::1 is fine). Avoids slow connect issues # like PYTHON-356. family = socket.AF_INET if socket.has_ipv6 and host != 'localhost': family = socket.AF_UNSPEC err = None for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): af, socktype, proto, dummy, sa = res green_sock = None try: sock = socket.socket(af, socktype, proto) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) green_sock = GreenletSocket( sock, self.io_loop, use_ssl=self.use_ssl, pool_ref=weakref.proxy(self)) # GreenletSocket will pause the current greenlet and resume it # when connection has completed green_sock.settimeout(self.conn_timeout) green_sock.connect(sa) green_sock.settimeout(self.net_timeout) return green_sock except socket.error, e: err = e if green_sock is not None: green_sock.close()
def wait_fd(fd, read=True): '''Wait for an event on file descriptor ``fd``. :param fd: file descriptor :param read=True: wait for a read event if ``True``, otherwise a wait for write event. This function must be invoked from a coroutine with parent, therefore invoking it from the main greenlet will raise an exception. Check how this function is used in the :func:`.psycopg2_wait_callback` function. ''' current = greenlet.getcurrent() parent = current.parent assert parent, '"wait_fd" must be called by greenlet with a parent' try: fileno = fd.fileno() except AttributeError: fileno = fd future = Future() # When the event on fd occurs switch back to the current greenlet if read: future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read) else: future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read) # switch back to parent greenlet parent.switch(future) return future.result()
def _green_run(self): # The run method of a worker greenlet task = True while task: green = getcurrent() parent = green.parent assert parent # add greenlet in the available greenlets self._available.add(green) task = parent.switch(_DONE) # switch back to the main execution if task: future, func, args, kwargs = task try: result = func(*args, **kwargs) except Exception as exc: future.set_exception(exc) else: future.set_result(result) else: # Greenlet cleanup self._greenlets.remove(green) if self._greenlets: self._put(None) elif self._waiter: self._waiter.set_result(None) self._waiter = None parent.switch(_DONE)
def socket(self, *args, **kwargs): child_gr = greenlet.getcurrent() main = child_gr.parent if main: return Socket(*args, **kwargs) else: return self.orig_socket(*args, **kwargs)
def join(self, timeout=None): """Wait for the event loop to finish. Exits only when there are no more spawned greenlets, started servers, active timeouts or watchers. If *timeout* is provided, wait no longer for the specified number of seconds. Returns True if exited because the loop finished execution. Returns False if exited because of timeout expired. """ assert getcurrent() is self.parent, "only possible from the MAIN greenlet" if self.dead: return True waiter = Waiter() if timeout is not None: timeout = self.loop.timer(timeout, ref=False) timeout.start(waiter.switch) try: try: waiter.get() except LoopExit: return True finally: if timeout is not None: timeout.stop() return False
def _setup(self): self._has_setup = True self.cur = None self.timings = {} self.current_tasklet = greenlet.getcurrent() self.thread_id = thread.get_ident() self.simulate_call("profiler")
def greenlet_fetch(request, **kwargs): """ To use this function, it must be called (either directly or indirectly) from a method wrapped by the greenlet_asynchronous decorator. The request arg may be either a string URL or an HTTPRequest object. If it is a string, any additional kwargs will be passed directly to AsyncHTTPClient.fetch(). Returns an HTTPResponse object, or raises a tornado.httpclient.HTTPError exception on error (such as a timeout, or a non-200 response). """ cli = tornado.httpclient.AsyncHTTPClient(max_clients=100) cli.max_clients = 100 gr = greenlet.getcurrent() assert gr.parent is not None, "greenlet_fetch() can only be called (possibly indirectly) from a RequestHandler method wrapped by the greenlet_asynchronous decorator." def callback(response): # Make sure we are on the master greenlet before we switch. tornado.ioloop.IOLoop.instance().add_callback(partial(gr.switch, response)) cli.fetch(request, callback, **kwargs) # Now, yield control back to the master greenlet, and wait for data to be sent to us. response = gr.parent.switch() # Raise the exception, if any. if response.error: try: url = request.url except Exception,e: url = request logging.warning("Error: %s for url %s" % (response.error, url)) response.rethrow()
def release(self): if self._owner is not greenlet.getcurrent(): raise RuntimeError("cannot release un-aquired lock") self._count = count = self._count - 1 if not count: self._owner = None self._block.release()
def sendall(self, data): assert greenlet.getcurrent().parent, "Should be on child greenlet" try: self.stream.write(data) except IOError, e: # PyMongo is built to handle socket.error here, not IOError raise socket.error(str(e))
def recv( s, msg ): while not s.send.rdy(): greenlet.getcurrent().parent.switch(0) assert s.send.rdy() s.send( msg )
def _get_thread_context(): context = [threading.current_thread()] if greenlet: context.append(greenlet.getcurrent()) return hash(tuple(context))
def link(next_greenlet): value = greenlet.getcurrent().parent.switch() next_greenlet.switch(value + 1)
def wake_up(mid: int, result: Any) -> None: handler = state["handlers"][mid] del state["handlers"][mid] handler.parent = greenlet.getcurrent() handler.switch(result)
def monitor_current_greenlet_blocking(self): self.active_greenlet = getcurrent()
def cb(r): result.append(r) if greenlet.getcurrent() is not current: current.switch(result)
def __init__(self, sync_base: "SyncBase", future: "asyncio.Future[T]") -> None: self._sync_base = sync_base self._future = future g_self = greenlet.getcurrent() self._future.add_done_callback(lambda _: g_self.switch())
def connect(self): self._closed = False self._loop = IOLoop.current() try: if self.unix_socket and self.host in ('localhost', '127.0.0.1'): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.host_info = "Localhost via UNIX socket" address = self.unix_socket else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) if self.bind_address is not None: sock.bind((self.bind_address, 0)) self.host_info = "socket %s:%d" % (self.host, self.port) address = (self.host, self.port) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) sock = IOStream(sock) sock.set_close_callback(self.stream_close_callback) child_gr = greenlet.getcurrent() main = child_gr.parent assert main is not None, "Execut must be running in child greenlet" self._loop_connect_timeout = None if self.connect_timeout: def timeout(): self._loop_connect_timeout = None if not self._sock: sock.close((None, IOError("Connect timeout"), None)) self._loop_connect_timeout = self._loop.call_later( self.connect_timeout, timeout) def connected(future): if self._loop_connect_timeout: self._loop.remove_timeout(self._loop_connect_timeout) self._loop_connect_timeout = None if future._exc_info is not None: child_gr.throw(future.exception()) else: self._sock = sock child_gr.switch() future = sock.connect(address) self._loop.add_future(future, connected) main.switch() self._rfile = self._sock self._next_seq_id = 0 self._get_server_information() self._request_authentication() if self.sql_mode is not None: c = self.cursor() c.execute("SET sql_mode=%s", (self.sql_mode, )) if self.init_command is not None: c = self.cursor() c.execute(self.init_command) self.commit() if self.autocommit_mode is not None: self.autocommit(self.autocommit_mode) except Exception as e: if self._sock: self._rfile = None self._sock.close() self._sock = None exc = err.OperationalError( 2003, "Can't connect to MySQL server on %s (%r)" % (self.unix_socket or ("%s:%s" % (self.host, self.port)), e)) # Keep original exception and traceback to investigate error. exc.original_exception = e exc.traceback = traceback.format_exc() raise exc
def _request_authentication(self): if int(self.server_version.split('.', 1)[0]) >= 5: self.client_flag |= CLIENT.MULTI_RESULTS if self.user is None: raise ValueError("Did not specify a username") charset_id = charset_by_name(self.charset).id if isinstance(self.user, text_type): self.user = self.user.encode(self.encoding) data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'') if self.ssl and self.server_capabilities & CLIENT.SSL: self.write_packet(data_init) child_gr = greenlet.getcurrent() main = child_gr.parent assert main is not None, "Execut must be running in child greenlet" def finish(future): if future._exc_info is not None: child_gr.throw(future.exception()) else: child_gr.switch(future.result()) future = self._sock.start_tls(False, self.ctx, server_hostname=self.host) self._loop.add_future(future, finish) self._rfile = self._sock = main.switch() data = data_init + self.user + b'\0' authresp = b'' if self._auth_plugin_name in ('', 'mysql_native_password'): authresp = _scramble(self.password.encode('latin1'), self.salt) if self.server_capabilities & CLIENT.PLUGIN_AUTH_LENENC_CLIENT_DATA: data += lenenc_int(len(authresp)) + authresp elif self.server_capabilities & CLIENT.SECURE_CONNECTION: data += struct.pack('B', len(authresp)) + authresp else: # pragma: no cover - not testing against servers without secure auth (>=5.0) data += authresp + b'\0' if self.db and self.server_capabilities & CLIENT.CONNECT_WITH_DB: if isinstance(self.db, text_type): self.db = self.db.encode(self.encoding) data += self.db + b'\0' if self.server_capabilities & CLIENT.PLUGIN_AUTH: name = self._auth_plugin_name if isinstance(name, text_type): name = name.encode('ascii') data += name + b'\0' self.write_packet(data) auth_packet = self._read_packet() # if authentication method isn't accepted the first byte # will have the octet 254 if auth_packet.is_auth_switch_request(): # https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest auth_packet.read_uint8() # 0xfe packet identifier plugin_name = auth_packet.read_string() if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None: auth_packet = self._process_auth(plugin_name, auth_packet) else: # send legacy handshake data = _scramble_323(self.password.encode('latin1'), self.salt) + b'\0' self.write_packet(data) auth_packet = self._read_packet()
def _switch_4_return_5(): this = greenlet.getcurrent() this.parent.switch(4) return 5
def _switch_future(): this = greenlet.getcurrent() this.parent.switch(future) return 5 # This should never get returned
def additional(): ll.append(greenlet.getcurrent())
def LOAD_IMAGE(url): g = greenlet.getcurrent() return g.parent.switch((TASK_REQUEST_LOADIMAGE, {'url': url}))
def ERROR(data=None): g = greenlet.getcurrent() return g.parent.switch((TASK_REQUEST_ERROR, data))
def get(self): return id(greenlet.getcurrent())
def _get_thread_ident(self): return id(greenlet.getcurrent())
def SKIP(data=None): g = greenlet.getcurrent() return g.parent.switch((TASK_REQUEST_SKIP, data))
def sleep(timeout, create_timer): '''Yield execution for timeout seconds.''' current = greenlet.getcurrent() timer = create_timer(timeout, current.switch) current.parent.switch()
def DONE(data=None): g = greenlet.getcurrent() return g.parent.switch((TASK_REQUEST_DONE, data))
def test_greenlet_sockets_with_request(self): # Verify two assumptions: that start_request() with two greenlets and # the regular pool will fail, meaning that the two greenlets will # share one socket. Also check that start_request() with GreenletPool # succeeds, meaning that two greenlets will get different sockets (this # is exactly the reason for creating GreenletPool). try: import greenlet except ImportError: raise SkipTest('greenlet not installed') pool_args = dict( pair=(host, port), max_size=10, net_timeout=1000, conn_timeout=1000, use_ssl=False, ) for pool_class, use_request, expect_success in [ (pool.GreenletPool, True, True), (pool.GreenletPool, False, False), (pool.Pool, True, False), (pool.Pool, False, False), ]: cx_pool = pool_class(**pool_args) # Map: greenlet -> socket greenlet2socks = {} main = greenlet.getcurrent() def get_socket_in_request(): # Get a socket from the pool twice, switching contexts each time if use_request: cx_pool.start_request() main.switch() for _ in range(2): sock = cx_pool.get_socket() cx_pool.return_socket(sock) greenlet2socks.setdefault(greenlet.getcurrent(), []).append(id(sock)) main.switch() cx_pool.end_request() greenlets = [ greenlet.greenlet(get_socket_in_request), greenlet.greenlet(get_socket_in_request), ] # Run both greenlets to completion looplet(greenlets) socks_for_gr0 = greenlet2socks[greenlets[0]] socks_for_gr1 = greenlet2socks[greenlets[1]] # Whether we expect requests to work or not, we definitely expect # greenlet2socks to have the same number of keys and values self.assertEqual(2, len(greenlet2socks)) self.assertEqual(2, len(socks_for_gr0)) self.assertEqual(2, len(socks_for_gr1)) # If we started a request, then there was a point at which we had # 2 active sockets, otherwise we always used one. if use_request and pool_class is pool.GreenletPool: self.assertEqual(2, len(cx_pool.sockets)) else: self.assertEqual(1, len(cx_pool.sockets)) # Again, regardless of whether requests work, a greenlet will get # the same socket each time it calls get_socket() within a request. # What we're really testing is that the two *different* greenlets # get *different* sockets from each other. self.assertEqual( socks_for_gr0[0], socks_for_gr0[1], "Expected greenlet 0 to get the same socket for each call " "to get_socket()") self.assertEqual( socks_for_gr1[0], socks_for_gr1[1], "Expected greenlet 1 to get the same socket for each call " "to get_socket()") if expect_success: # We used the proper pool class, so start_request successfully # distinguished between the two greenlets. self.assertNotEqual( socks_for_gr0[0], socks_for_gr1[0], "Expected two greenlets to get two different sockets") else: # We used the wrong pool class, so start_request didn't # distinguish between the two greenlets, and it gave them both # the same socket. self.assertEqual(socks_for_gr0[0], socks_for_gr1[0], "Expected two greenlets to get same socket")
def wait(): event.wait() order.append(greenlet.getcurrent())
def _assert_not_in_coroutine(): if _disable_not_in_coroutine_assert: return assert greenlet.getcurrent( ).parent is None, 'Did not expect to be in a coroutine'
def resourcehandler_backend(env, start_response): """Function to handle new wsgi requests """ mimetype, extension = _pick_mimetype(env) headers = [('Content-Type', mimetype), ('Cache-Control', 'no-store'), ('Pragma', 'no-cache'), ('X-Content-Type-Options', 'nosniff'), ('Content-Security-Policy', "default-src 'self'"), ('X-XSS-Protection', '1; mode=block'), ('X-Frame-Options', 'deny'), ('Strict-Transport-Security', 'max-age=86400'), ('X-Permitted-Cross-Domain-Policies', 'none')] reqbody = None reqtype = None if env.get('PATH_INFO', '').startswith('/self/'): for res in selfservice.handle_request(env, start_response): yield res return if 'CONTENT_LENGTH' in env and int(env['CONTENT_LENGTH']) > 0: reqbody = env['wsgi.input'].read(int(env['CONTENT_LENGTH'])) reqtype = env['CONTENT_TYPE'] operation = opmap[env['REQUEST_METHOD']] querydict = _get_query_dict(env, reqbody, reqtype) if operation != 'retrieve' and 'restexplorerop' in querydict: operation = querydict['restexplorerop'] del querydict['restexplorerop'] authorized = _authorize_request(env, operation) if 'logout' in authorized: start_response('200 Successful logout', headers) yield ('{"result": "200 - Successful logout"}') return if 'HTTP_SUPPRESSAUTHHEADER' in env or 'HTTP_CONFLUENTAUTHTOKEN' in env: badauth = [('Content-type', 'text/plain')] else: badauth = [('Content-type', 'text/plain'), ('WWW-Authenticate', 'Basic realm="confluent"')] if authorized['code'] == 401: start_response('401 Authentication Required', badauth) yield 'authentication required' return if authorized['code'] == 403: start_response('403 Forbidden', badauth) yield 'Forbidden' return if authorized['code'] != 200: raise Exception("Unrecognized code from auth engine") headers.extend(("Set-Cookie", m.OutputString()) for m in authorized['cookie'].values()) cfgmgr = authorized['cfgmgr'] if (operation == 'create') and env['PATH_INFO'] == '/sessions/current/async': pagecontent = "" try: for rsp in _assemble_json( confluent.asynchttp.handle_async( env, querydict, httpsessions[authorized['sessionid']]['inflight'])): pagecontent += rsp start_response("200 OK", headers) if not isinstance(pagecontent, bytes): pagecontent = pagecontent.encode('utf-8') yield pagecontent return except exc.ConfluentException as e: if e.apierrorcode == 500: # raise generics to trigger the tracelog raise start_response('{0} {1}'.format(e.apierrorcode, e.apierrorstr), headers) yield e.get_error_body() elif (env['PATH_INFO'].endswith('/forward/web') and env['PATH_INFO'].startswith('/nodes/')): prefix, _, _ = env['PATH_INFO'].partition('/forward/web') _, _, nodename = prefix.rpartition('/') hm = cfgmgr.get_node_attributes(nodename, 'hardwaremanagement.manager') targip = hm.get(nodename, {}).get('hardwaremanagement.manager', {}).get('value', None) if not targip: start_response('404 Not Found', headers) yield 'No hardwaremanagemnet.manager defined for node' return funport = forwarder.get_port(targip, env['HTTP_X_FORWARDED_FOR'], authorized['sessionid']) host = env['HTTP_X_FORWARDED_HOST'] if ']' in host: host = host.split(']')[0] + ']' elif ':' in host: host = host.rsplit(':', 1)[0] url = 'https://{0}:{1}/'.format(host, funport) start_response('302', [('Location', url)]) yield 'Our princess is in another castle!' return elif (operation == 'create' and ('/console/session' in env['PATH_INFO'] or '/shell/sessions/' in env['PATH_INFO'])): #hard bake JSON into this path, do not support other incarnations if '/console/session' in env['PATH_INFO']: prefix, _, _ = env['PATH_INFO'].partition('/console/session') shellsession = False elif '/shell/sessions/' in env['PATH_INFO']: prefix, _, _ = env['PATH_INFO'].partition('/shell/sessions') shellsession = True _, _, nodename = prefix.rpartition('/') if 'session' not in querydict.keys() or not querydict['session']: auditmsg = { 'operation': 'start', 'target': env['PATH_INFO'], 'user': util.stringify(authorized['username']), } if 'tenant' in authorized: auditmsg['tenant'] = authorized['tenant'] auditlog.log(auditmsg) # Request for new session skipreplay = False if 'skipreplay' in querydict and querydict['skipreplay']: skipreplay = True width = querydict.get('width', 80) height = querydict.get('height', 24) datacallback = None asynchdl = None if 'HTTP_CONFLUENTASYNCID' in env: asynchdl = confluent.asynchttp.get_async(env, querydict) termrel = asynchdl.set_term_relation(env) datacallback = termrel.got_data try: if shellsession: consession = shellserver.ShellSession( node=nodename, configmanager=cfgmgr, username=authorized['username'], skipreplay=skipreplay, datacallback=datacallback, width=width, height=height) else: consession = consoleserver.ConsoleSession( node=nodename, configmanager=cfgmgr, username=authorized['username'], skipreplay=skipreplay, datacallback=datacallback, width=width, height=height) except exc.NotFoundException: start_response("404 Not found", headers) yield "404 - Request Path not recognized" return if not consession: start_response("500 Internal Server Error", headers) return sessid = _assign_consessionid(consession) if asynchdl: asynchdl.add_console_session(sessid) start_response('200 OK', headers) yield '{"session":"%s","data":""}' % sessid return elif 'bytes' in querydict.keys(): # not keycodes... myinput = querydict['bytes'] sessid = querydict['session'] if sessid not in consolesessions: start_response('400 Expired Session', headers) return consolesessions[sessid]['expiry'] = time.time() + 90 consolesessions[sessid]['session'].write(myinput) start_response('200 OK', headers) yield json.dumps({'session': querydict['session']}) return # client has requests to send or receive, not both... elif 'closesession' in querydict: consolesessions[querydict['session']]['session'].destroy() del consolesessions[querydict['session']] start_response('200 OK', headers) yield '{"sessionclosed": true}' return elif 'action' in querydict: if querydict['action'] == 'break': consolesessions[querydict['session']]['session'].send_break() elif querydict['action'] == 'resize': consolesessions[querydict['session']]['session'].resize( width=querydict['width'], height=querydict['height']) elif querydict['action'] == 'reopen': consolesessions[querydict['session']]['session'].reopen() else: start_response('400 Bad Request') yield 'Unrecognized action ' + querydict['action'] return start_response('200 OK', headers) yield json.dumps({'session': querydict['session']}) else: # no keys, but a session, means it's hooking to receive data sessid = querydict['session'] if sessid not in consolesessions: start_response('400 Expired Session', headers) yield '' return consolesessions[sessid]['expiry'] = time.time() + 90 # add our thread to the 'inflight' to have a hook to terminate # a long polling request loggedout = None mythreadid = greenlet.getcurrent() httpsessions[authorized['sessionid']]['inflight'].add(mythreadid) try: outdata = consolesessions[sessid]['session'].get_next_output( timeout=25) except greenlet.GreenletExit as ge: loggedout = ge httpsessions[authorized['sessionid']]['inflight'].discard( mythreadid) if sessid not in consolesessions: start_response('400 Expired Session', headers) yield '' return if loggedout is not None: consolesessions[sessid]['session'].destroy() start_response('401 Logged out', headers) yield '{"loggedout": 1}' return bufferage = False if 'stampsent' not in consolesessions[sessid]: consolesessions[sessid]['stampsent'] = True bufferage = consolesessions[sessid]['session'].get_buffer_age() if isinstance(outdata, dict): rspdata = outdata rspdata['session'] = querydict['session'] else: rspdata = {'session': querydict['session'], 'data': outdata} if bufferage is not False: rspdata['bufferage'] = bufferage try: rsp = json.dumps(rspdata) except UnicodeDecodeError: try: rsp = json.dumps(rspdata, encoding='cp437') except UnicodeDecodeError: rsp = json.dumps({ 'session': querydict['session'], 'data': 'DECODEERROR' }) start_response('200 OK', headers) yield rsp return else: # normal request url = env['PATH_INFO'] url = url.replace('.json', '') url = url.replace('.html', '') if url == '/sessions/current/info': start_response('200 OK', headers) sessinfo = {'username': authorized['username']} if 'authtoken' in authorized: sessinfo['authtoken'] = authorized['authtoken'] tlvdata.unicode_dictvalues(sessinfo) yield json.dumps(sessinfo) return resource = '.' + url[url.rindex('/'):] lquerydict = copy.deepcopy(querydict) try: hdlr = pluginapi.handle_path(url, operation, cfgmgr, querydict) if 'HTTP_CONFLUENTASYNCID' in env: confluent.asynchttp.run_handler(hdlr, env) start_response('202 Accepted', headers) yield 'Request queued' return pagecontent = "" if mimetype == 'text/html': for datum in _assemble_html(hdlr, resource, lquerydict, url, extension): pagecontent += datum else: for datum in _assemble_json(hdlr, resource, url, extension): pagecontent += datum start_response('200 OK', headers) if not isinstance(pagecontent, bytes): pagecontent = pagecontent.encode('utf-8') yield pagecontent except exc.ConfluentException as e: if ((not isinstance(e, exc.LockedCredentials)) and e.apierrorcode == 500): # raise generics to trigger the tracelog raise start_response('{0} {1}'.format(e.apierrorcode, e.apierrorstr), headers) yield e.get_error_body()
def wait(value): '''Wait for a possible asynchronous value to complete. ''' current = getcurrent() parent = current.parent return parent.switch(value) if parent else value
def stop_users(self, user_count, stop_rate=None): """ Stop `user_count` weighted users at a rate of `stop_rate` """ if user_count == 0 or stop_rate == 0: return bucket = self.weight_users(user_count) user_count = len(bucket) to_stop = [] for user_greenlet in self.user_greenlets: try: user = user_greenlet.args[0] except IndexError: logger.error( "While stopping users, we encountered a user that didnt have proper args %s", user_greenlet) continue for user_class in bucket: if isinstance(user, user_class): to_stop.append(user) bucket.remove(user_class) break if not to_stop: return if stop_rate is None or stop_rate >= user_count: sleep_time = 0 logger.info("Stopping %i users" % (user_count)) else: sleep_time = 1.0 / stop_rate logger.info("Stopping %i users at rate of %g users/s" % (user_count, stop_rate)) async_calls_to_stop = Group() stop_group = Group() while True: user_to_stop: User = to_stop.pop( random.randint(0, len(to_stop) - 1)) logger.debug("Stopping %s" % user_to_stop._greenlet.name) if user_to_stop._greenlet is greenlet.getcurrent(): # User called runner.quit(), so dont block waiting for killing to finish" user_to_stop._group.killone(user_to_stop._greenlet, block=False) elif self.environment.stop_timeout: async_calls_to_stop.add( gevent.spawn_later(0, user_to_stop.stop, force=False)) stop_group.add(user_to_stop._greenlet) else: async_calls_to_stop.add( gevent.spawn_later(0, user_to_stop.stop, force=True)) if to_stop: gevent.sleep(sleep_time) else: break async_calls_to_stop.join() if not stop_group.join(timeout=self.environment.stop_timeout): logger.info( "Not all users finished their tasks & terminated in %s seconds. Stopping them..." % self.environment.stop_timeout) stop_group.kill(block=True) logger.info("%i Users have been stopped, %g still running" % (user_count, len(self.user_greenlets)))
def wait(self): while not self.q: # block until we have data self.waiting.append(getcurrent().switch) scheduler.switch()
def confirm(self, q): """Expected to return True or False, given question prompt q""" self.request_context = greenlet.getcurrent() self.prompt = q self.in_confirm = True return self.main_context.switch(q)
def switch(self): switch_out = getattr(getcurrent(), 'switch_out', None) if switch_out is not None: switch_out() return RawGreenlet.switch(self)
def worker(): # only main greenlet present gg.append(weakref.ref(greenlet.getcurrent()))