def testSelfConnectionUnixDgram(self): if not hasattr(socket, 'AF_UNIX'): print('Skip UNIX socket test because not supported') return try: os.remove('/var/run/unixsocktestudp1.sock') except: pass try: os.remove('/var/run/unixsocktestudp2.sock') except: pass c1 = Client('dunix:/var/run/unixsocktestudp2.sock', self.protocolClient, self.scheduler, bindaddress = ((socket.AF_UNIX, '/var/run/unixsocktestudp1.sock'),)) c2 = Client('pdunix:/var/run/unixsocktestudp2.sock', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB')
def testCAVerify3(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, None, None, 'testcerts/root.crt') c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt','testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') self.notconnected = False def notConnected(connection): if connection is c1: self.notconnected = True if False: yield self.protocolClient.notconnected = notConnected r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertTrue(self.notconnected) self.assertEqual(ret, b'')
def testSelfConnectionUdp(self): c1 = Client('udp://localhost:199', self.protocolClient, self.scheduler) c2 = Client('pudp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m, ) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB')
def testSelfConnectionUnixDgram(self): if not hasattr(socket, 'AF_UNIX'): print('Skip UNIX socket test because not supported') return try: os.remove('/var/run/unixsocktestudp1.sock') except Exception: pass try: os.remove('/var/run/unixsocktestudp2.sock') except Exception: pass c1 = Client('dunix:/var/run/unixsocktestudp2.sock', self.protocolClient, self.scheduler, bindaddress = ((socket.AF_UNIX, '/var/run/unixsocktestudp1.sock'),)) c2 = Client('pdunix:/var/run/unixsocktestudp2.sock', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB')
def _create_client(self, container): if self._shutdown: raise IOError('RedisClient already shutdown') conn = Client(self.url, self._protocol, container.scheduler, getattr(self, 'key', None), getattr(self, 'certificate', None), getattr(self, 'ca_certs', None)) conn.start() return conn
def testServerClientSsl(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, 'testcerts/client.key', 'testcerts/client.crt', 'testcerts/root.crt') s1 = TcpServer('lssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key', 'testcerts/server.crt', 'testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() stopped = False while True: yield (m,) if r.event.connection is c1: ret.extend(b'B') else: ret.extend(b'A') if not stopped: for m in s1.shutdown(): yield m stopped = True r.main = mainA r.start() s1.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB')
def client_connect(self, container, url, *args, **kwargs): ''' Create a connection with raw protocol :param container: current routine container :param url: url to connect to (see Client) :param *args, **kwargs: other parameters to create a Client (except url, protocol and scheduler) :returns: (connection, inputstream, outputstream) where client is the created connection, inputstream is the stream to read from the socket, outputstream is the stream to write to socket ''' c = Client(url, self, container.scheduler, *args, **kwargs) c.start() yield (self.statematcher(c, RawConnectionStateEvent.CONNECTION_UP, False), self.statematcher(c, RawConnectionStateEvent.CONNECTION_NOTCONNECTED, False)) if self.event.state == RawConnectionStateEvent.CONNECTION_UP: container.retval = (c, self.event.inputstream, self.event.outputstream) else: raise IOError('Connection failed')
def testMultipleClients(self): c1 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) c2 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) s1 = TcpServer('ltcp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) counter = {c1: 0, c2: 0} ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() c1c = False c2c = False shutdown = False while True: yield (m, ) counter[r.event.connection] = counter.get( r.event.connection, 0) + 1 if r.event.connection is c1: ret.extend(b'A') c1c = True elif r.event.connection is c2: ret.extend(b'B') c2c = True if c1c and c2c and not shutdown: for m in s1.shutdown(): yield m shutdown = True r.main = mainA r.start() s1.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) r.subroutine(waitAndStart(c2)) self.scheduler.main() print(ret) self.assertEqual(counter[c1], 10) self.assertEqual(counter[c2], 10)
def client_connect(self, container, url, *args, **kwargs): """ Create a connection with raw protocol :param container: current routine container :param url: url to connect to (see Client) :param *args, **kwargs: other parameters to create a Client (except url, protocol and scheduler) :returns: (connection, inputstream, outputstream) where client is the created connection, inputstream is the stream to read from the socket, outputstream is the stream to write to socket """ c = Client(url, self, container.scheduler, *args, **kwargs) c.start() yield ( self.statematcher(c, RawConnectionStateEvent.CONNECTION_UP, False), self.statematcher(c, RawConnectionStateEvent.CONNECTION_NOTCONNECTED, False), ) if self.event.state == RawConnectionStateEvent.CONNECTION_UP: container.retval = (c, self.event.inputstream, self.event.outputstream) else: raise IOError("Connection failed")
def testSelfConnectionUdp(self): c1 = Client('udp://localhost:199', self.protocolClient, self.scheduler) c2 = Client('pudp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB')
def _intercept_main(self): cr = self.apiroutine.currentroutine self.sendEventQueue = Queue() _console_connect_event = threading.Event() _console_connect_event.clear() for m in self.apiroutine.waitForSend(ConsoleEvent('initproxy')): yield m if not self.startinconsole: p = Protocol() p.persist = True p.createqueue = False def init(connection): sock = connection.socket self.telnet_socket = sock self.scheduler.unregisterPolling(connection.socket) connection.socket = None connection.connected = False _console_connect_event.set() yield (SocketInjectDone.createMatcher(sock), ) p.init = init p.reconnect_init = init Client(self.telnetconsole, p, self.scheduler, self.key, self.certificate, self.ca_certs).start() def syscall_threaded_main(scheduler, processor): # Detach self scheduler.unregisterall(cr) self._threaded_main_quit = False def threaded_main(): try: scheduler.main(False, False) finally: self._threaded_main_quit = True _console_connect_event.set() t = threading.Thread(target=threaded_main) t.daemon = True t.start() try: if self.startinconsole: self._interactive() else: while not self._threaded_main_quit: try: while not _console_connect_event.is_set(): # There is a bug in Python 2.x that wait without timeout cannot be # interrupted by signal _console_connect_event.wait(3600) if self._threaded_main_quit: break except InterruptedBySignalException: # This signal should interrupt the poller, but poller is not in the main thread # Send an event through the proxy will do the trick self.sendEventQueue.put((InterruptPoller(), )) continue pstdin_r, pstdin_w = os.pipe() pstdout_r, pstdout_w = os.pipe() orig_stdin = sys.stdin orig_stdout = sys.stdout orig_stderr = sys.stderr try: pstdin = os.fdopen(pstdin_r, 'rU', 0) pstdout = os.fdopen(pstdout_w, 'w', 0) sys.stdin = pstdin sys.stdout = pstdout sys.stderr = pstdout sock = self.telnet_socket sock.setblocking(True) self.telnet_socket = None _console_connect_event.clear() t = threading.Thread(target=self._telnet_server, args=(pstdin_w, pstdout_r, sock, orig_stdout)) t.daemon = True t.start() try: self._interactive() except SystemExit: pass if not t.is_alive(): break self.sendEventQueue.put((SocketInjectDone(sock), )) finally: try: sock.shutdown(socket.SHUT_RDWR) except: pass try: pstdin.close() except: pass try: pstdout.close() except: pass sys.stdin = orig_stdin sys.stdout = orig_stdout sys.stderr = orig_stderr except SystemExit: pass finally: self.sendEventQueue.put(None) scheduler.quit() if self.startinconsole: print('Wait for scheduler end, this may take some time...') t.join() for m in self.apiroutine.syscall(syscall_threaded_main, True): yield m
def _getconnection(self, container, host, path, https = False, forcecreate = False, cafile = None, key = None, certificate = None, timeout = None): if not host: raise ValueError matcher = WebClientRequestDoneEvent.createMatcher(host, path, https) while self.sameurllimit and (host, path, https) in self._requesting: self._pathwaiting.add((host, path, https)) yield (matcher,) # Lock the path if self.sameurllimit: self._requesting.add((host, path, https)) # connmap format: (free, free_ssl, workingcount) conns = self._connmap.setdefault(host, [[],[], 0]) conns[0] = [c for c in conns[0] if c.connected] conns[1] = [c for c in conns[1] if c.connected] myset = conns[1 if https else 0] if not forcecreate and myset: # There are free connections, reuse them conn = myset.pop() conn.setdaemon(False) container.retvalue = (conn, False) conns[2] += 1 return matcher = WebClientRequestDoneEvent.createMatcher(host) while self.samehostlimit and len(conns[0]) + len(conns[1]) + conns[2] >= self.samehostlimit: if myset: # Close a old connection conn = myset.pop() for m in conn.shutdown(): yield m else: # Wait for free connections self._hostwaiting.add(host) yield (matcher,) conns = self._connmap.setdefault(host, [[],[], 0]) myset = conns[1 if https else 0] if not forcecreate and myset: conn = myset.pop() conn.setdaemon(False) container.retvalue = (conn, False) conns[2] += 1 return # Create new connection conns[2] += 1 conn = Client(urlunsplit(('ssl' if https else 'tcp', host, '/', '', '')), self._protocol, container.scheduler, key, certificate, cafile) if timeout is not None: conn.connect_timeout = timeout conn.start() connected = self._protocol.statematcher(conn, HttpConnectionStateEvent.CLIENT_CONNECTED, False) notconnected = self._protocol.statematcher(conn, HttpConnectionStateEvent.CLIENT_NOTCONNECTED, False) yield (connected, notconnected) if container.matcher is notconnected: conns[2] -= 1 for m in conn.shutdown(True): yield m raise IOError('Failed to connect to %r' % (conn.rawurl,)) if https and cafile and self.verifyhost: try: # TODO: check with SSLContext hostcheck = re.sub(r':\d+$', '', host) if host == conn.socket.remoteaddr[0]: # IP Address is currently now allowed for m in conn.shutdown(True): yield m raise CertificateException('Cannot verify host with IP address') match_hostname(conn.socket.getpeercert(False), hostcheck) except: conns[2] -= 1 raise container.retvalue = (conn, True)
def _connection_manage(self): try: failed = 0 self._last_zxid = last_zxid = 0 session_id = 0 passwd = b'\x00' * 16 last_conn_time = None while True: self.currentserver = self.serverlist[self.nextptr] np = self.nextptr + 1 if np >= len(self.serverlist): np = 0 self.nextptr = np conn = Client(self.currentserver, self.protocol, self._container.scheduler, self.key, self.certificate, self.ca_certs) self.current_connection = conn conn_up = ZooKeeperConnectionStateEvent.createMatcher( ZooKeeperConnectionStateEvent.UP, conn) conn_nc = ZooKeeperConnectionStateEvent.createMatcher( ZooKeeperConnectionStateEvent.NOTCONNECTED, conn) conn.start() try: yield (conn_up, conn_nc) if self._container.matcher is conn_nc: self._logger.warning( 'Connect to %r failed, try next server', self.currentserver) if failed > 5: # Wait for a small amount of time to prevent a busy loop # Socket may be rejected, it may fail very quick for m in self._container.waitWithTimeout( min((failed - 5) * 0.1, 1.0)): yield m failed += 1 continue try: # Handshake set_watches = [] if self.session_state == ZooKeeperSessionStateChanged.DISCONNECTED: for m in self._container.waitForSend( ZooKeeperRestoreWatches( self, self.session_id, True, restore_watches=(set(), set(), set()))): yield m yield ( ZooKeeperRestoreWatches.createMatcher(self), ) data_watches, exists_watches, child_watches = \ self._container.event.restore_watches if data_watches or exists_watches or child_watches: current_set_watches = zk.SetWatches( relativeZxid=last_zxid) current_length = 0 for d, e, c in izip_longest( data_watches, exists_watches, child_watches): if d is not None: current_set_watches.dataWatches.append( d) current_length += 4 + len(d) if e is not None: current_set_watches.existWatches.append( e) current_length += 4 + len(e) if c is not None: current_set_watches.childWatches.append( c) current_length += 4 + len(c) if current_length > _MAX_SETWATCHES_SIZE: # Split set_watches set_watches.append(current_set_watches) current_set_watches = zk.SetWatches( relativeZxid=last_zxid) if current_set_watches.dataWatches or current_set_watches.existWatches \ or current_set_watches.childWatches: set_watches.append(current_set_watches) auth_list = list(self.auth_set) with closing( self._container.executeWithTimeout( 10, self.protocol.handshake( conn, zk.ConnectRequest( lastZxidSeen=last_zxid, timeOut=int(self.sessiontimeout * 1000.0), sessionId=session_id, passwd=passwd, readOnly=self.readonly), self._container, [ zk.AuthPacket(scheme=a[0], auth=a[1]) for a in auth_list ] + set_watches))) as g: for m in g: yield m if self._container.timeout: raise IOError except ZooKeeperSessionExpiredException: self._logger.warning('Session expired.') # Session expired self.session_state = ZooKeeperSessionStateChanged.EXPIRED for m in self._container.waitForSend( ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.EXPIRED, self, session_id)): yield m if self.restart_session: failed = 0 last_zxid = 0 session_id = 0 passwd = b'\x00' * 16 last_conn_time = None continue else: break except Exception: self._logger.warning( 'Handshake failed to %r, try next server', self.currentserver) if failed > 5: # There is a bug ZOOKEEPER-1159 that ZooKeeper server does not respond # for session expiration, but directly close the connection. # This is a workaround: we store the time that we disconnected from the server, # if we have exceeded the session expiration time, we declare the session is expired if last_conn_time is not None and last_conn_time + self.sessiontimeout * 2 < time( ): self._logger.warning( 'Session expired detected from client time.' ) # Session expired self.session_state = ZooKeeperSessionStateChanged.EXPIRED for m in self._container.waitForSend( ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged. EXPIRED, self, session_id)): yield m if self.restart_session: failed = 0 last_zxid = 0 session_id = 0 passwd = b'\x00' * 16 last_conn_time = None continue else: break else: # Wait for a small amount of time to prevent a busy loop for m in self._container.waitWithTimeout( min((failed - 5) * 0.1, 1.0)): yield m failed += 1 else: failed = 0 conn_resp, auth_resp = self._container.retvalue if conn_resp.timeOut <= 0: # Session expired # Currently should not happen because handshake() should raise an exception self._logger.warning( 'Session expired detected from handshake packet' ) self.session_state = ZooKeeperSessionStateChanged.EXPIRED for m in self._container.waitForSend( ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.EXPIRED, self, session_id)): yield m if self.restart_session: failed = 0 last_zxid = 0 last_conn_time = None session_id = 0 passwd = b'\x00' * 16 continue else: break else: session_id = conn_resp.sessionId passwd = conn_resp.passwd # Authentication result check auth_failed = any(a.err == zk.ZOO_ERR_AUTHFAILED for a in auth_resp) if auth_failed: self._logger.warning( 'ZooKeeper authentication failed for following auth: %r', [ a for a, r in zip(auth_list, auth_resp) if r.err == zk.ZOO_ERR_AUTHFAILED ]) self.session_state = ZooKeeperSessionStateChanged.AUTHFAILED for m in self._container.waitForSend( ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged. AUTHFAILED, self, session_id)): yield m # Not retrying break else: self.session_readonly = getattr( conn_resp, 'readOnly', False) self.session_id = session_id if self.session_state == ZooKeeperSessionStateChanged.EXPIRED: for m in self._container.waitForSend( ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged. CREATED, self, session_id)): yield m else: for m in self._container.waitForSend( ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged. RECONNECTED, self, session_id)): yield m self.session_state = ZooKeeperSessionStateChanged.CREATED if conn.connected: conn_down = ZooKeeperConnectionStateEvent.createMatcher( ZooKeeperConnectionStateEvent.DOWN, conn, conn.connmark) auth_failed = ZooKeeperResponseEvent.createMatcher( zk.AUTH_XID, conn, conn.connmark, _ismatch=lambda x: x.message.err == ZOO_ERR_AUTHFAILED) while True: rebalancetime = self.rebalancetime if rebalancetime is not None: rebalancetime += random() * 60 for m in self._container.waitWithTimeout( rebalancetime, conn_down, auth_failed): yield m if self._container.timeout: # Rebalance if conn.zookeeper_requests: # There are still requests not processed, wait longer for _ in range(0, 3): longer_time = random() * 10 for m in self._container.waitWithTimeout( longer_time, conn_down, auth_failed): yield m if not self._container.timeout: # Connection is down, or auth failed break if not conn.zookeeper_requests: break else: # There is still requests, skip for this time continue # Rebalance to a random server if self._container.timeout: self.nextptr = randrange( len(self.serverlist)) break if self._container.matcher is auth_failed: self._logger.warning( 'ZooKeeper authentication failed, shutdown the connection' ) self.session_state = ZooKeeperSessionStateChanged.AUTHFAILED for m in self._container.waitForSend( ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged. AUTHFAILED, self, session_id)): yield m # Not retrying break else: # Connection is down, try other servers if not self._container.timeout: self._logger.warning( 'Connection lost to %r, try next server', self.currentserver) else: self._logger.info( 'Rebalance to next server') self._last_zxid = last_zxid = conn.zookeeper_lastzxid last_conn_time = time() self.session_state = ZooKeeperSessionStateChanged.DISCONNECTED for m in self._container.waitForSend( ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged. DISCONNECTED, self, session_id)): yield m finally: conn.subroutine(conn.shutdown(True), False) self.current_connection = None finally: self._shutdown = True if self.session_state != ZooKeeperSessionStateChanged.EXPIRED and self.session_state != ZooKeeperSessionStateChanged.AUTHFAILED: self.session_state = ZooKeeperSessionStateChanged.EXPIRED self._container.scheduler.emergesend( ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.EXPIRED, self, session_id))
class Cleanup(ScriptModule): ''' Clean up unreleased veth devices, delete unreleased logical ports. Comparing current logical ports with docker API result cleanup.py -f <configfile> [-H <endpoint>] [--skipovs] [--skipiplink] [--skiplogicalport] [--nodockerinfo] -H or --host: specify docker API endpoint --skipovs: do not remove invalid ports from OpenvSwitch --skipiplink: do not remove extra veth devices --skiplogicalport: do not remove unreleased logical ports --nodockerinfo: do not detect docker info, always delete logical ports ''' options = (('skipovs', None, False), ('skipiplink', None, False), ('skiplogicalport', None, False), ('host', 'H', True)) def run(self, host=None, skipovs=None, skipiplink=None, skiplogicalport=None): skipovs = (skipovs is not None) skipiplink = (skipiplink is not None) skiplogicalport = (skiplogicalport is not None) pool = TaskPool(self.scheduler) pool.start() if host is None: host = os.environ.get('DOCKER_HOST', 'unix:///var/run/docker.sock') enable_ssl = os.environ.get('DOCKER_TLS_VERIFY', '') cert_root_path = os.environ.get('DOCKER_CERT_PATH', '~/.docker') ca_path, cert_path, key_path = [ os.path.join(cert_root_path, f) for f in ('ca.pem', 'cert.pem', 'key.pem') ] if '/' not in host: if enable_ssl: host = 'ssl://' + host else: host = 'tcp://' + host self._docker_conn = None http_protocol = Http(False) http_protocol.defaultport = 2375 http_protocol.ssldefaultport = 2375 http_protocol.persist = False def _create_docker_conn(): self._docker_conn = Client(host, http_protocol, self.scheduler, key_path, cert_path, ca_path) self._docker_conn.start() return self._docker_conn def call_docker_api(path, data=None, method=None): if self._docker_conn is None or not self._docker_conn.connected: _create_docker_conn() conn_up = HttpConnectionStateEvent.createMatcher( HttpConnectionStateEvent.CLIENT_CONNECTED) conn_noconn = HttpConnectionStateEvent.createMatcher( HttpConnectionStateEvent.CLIENT_NOTCONNECTED) yield (conn_up, conn_noconn) if self.apiroutine.matcher is conn_noconn: raise IOError('Cannot connect to docker API endpoint: ' + repr(host)) if method is None: if data is None: method = b'GET' else: method = b'POST' if data is None: for m in http_protocol.requestwithresponse( self.apiroutine, self._docker_conn, b'docker', _bytes(path), method, [(b'Accept-Encoding', b'gzip, deflate')]): yield m else: for m in http_protocol.requestwithresponse( self.apiroutine, self._docker_conn, b'docker', _bytes(path), method, [(b'Content-Type', b'application/json;charset=utf-8'), (b'Accept-Encoding', b'gzip, deflate')], MemoryStream(_bytes(json.dumps(data)))): yield m final_resp = self.apiroutine.http_finalresponse output_stream = final_resp.stream try: if final_resp.statuscode >= 200 and final_resp.statuscode < 300: if output_stream is not None and b'content-encoding' in final_resp.headerdict: ce = final_resp.headerdict.get(b'content-encoding') if ce.lower() == b'gzip' or ce.lower() == b'x-gzip': output_stream.getEncoderList().append( encoders.gzip_decoder()) elif ce.lower() == b'deflate': output_stream.getEncoderList().append( encoders.deflate_decoder()) if output_stream is None: self.apiroutine.retvalue = {} else: for m in output_stream.read(self.apiroutine): yield m self.apiroutine.retvalue = json.loads( self.apiroutine.data.decode('utf-8')) else: raise ValueError('Docker API returns error status: ' + repr(final_resp.status)) finally: if output_stream is not None: output_stream.close(self.scheduler) def execute_bash(script, ignoreerror=True): def task(): try: sp = subprocess.Popen(['bash'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) outdata, errdata = sp.communicate(script) sys.stderr.write(_str(errdata)) errno = sp.poll() if errno != 0 and not ignoreerror: print('Script failed, output:\n', repr(outdata), file=sys.stderr) raise ValueError('Script returns %d' % (errno, )) else: return _str(outdata) finally: if sp.poll() is None: try: sp.terminate() sleep(2) if sp.poll() is None: sp.kill() except Exception: pass for m in pool.runTask(self.apiroutine, task): yield m ovsbridge = manager.get('module.dockerplugin.ovsbridge', 'dockerbr0') vethprefix = manager.get('module.dockerplugin.vethprefix', 'vlcp') ipcommand = manager.get('module.dockerplugin.ipcommand', 'ip') ovscommand = manager.get('module.dockerplugin.ovscommand', 'ovs-vsctl') find_invalid_ovs = _find_invalid_ovs % (shell_quote(ovscommand), shell_quote(vethprefix)) find_unused_veth = _find_unused_veth % (shell_quote(ipcommand), shell_quote(vethprefix)) print("docker API endpoint: ", host) print("ovsbridge: ", ovsbridge) print("vethprefix: ", vethprefix) def invalid_ovs_ports(): for m in execute_bash(find_invalid_ovs): yield m first_invalid_ovs_list = self.apiroutine.retvalue.splitlines(False) first_invalid_ovs_list = [ k.strip() for k in first_invalid_ovs_list if k.strip() ] if first_invalid_ovs_list: print( "Detect %d invalid ports from OpenvSwitch, wait 5 seconds to detect again..." % (len(first_invalid_ovs_list), )) else: self.apiroutine.retvalue = [] return for m in self.apiroutine.waitWithTimeout(5): yield m for m in execute_bash(find_invalid_ovs): yield m second_invalid_ovs_list = self.apiroutine.retvalue.splitlines( False) second_invalid_ovs_list = [ k.strip() for k in second_invalid_ovs_list if k.strip() ] invalid_ports = list( set(first_invalid_ovs_list).intersection( second_invalid_ovs_list)) if invalid_ports: print( 'Detect %d invalid ports from intersection of two tries, removing...' % (len(invalid_ports), )) # Remove these ports def _remove_ports(): for p in invalid_ports: try: _unplug_ovs(ovscommand, ovsbridge, p[:-len('-tag')]) except Exception as exc: print('Remove port %r failed: %s' % (p, exc)) for m in pool.runTask(self.apiroutine, _remove_ports): yield m self.apiroutine.retvalue = invalid_ports return def remove_unused_ports(): for m in execute_bash(find_unused_veth): yield m first_unused_ports = self.apiroutine.retvalue.splitlines(False) first_unused_ports = [ k.strip() for k in first_unused_ports if k.strip() ] if first_unused_ports: print( "Detect %d unused ports from ip-link, wait 5 seconds to detect again..." % (len(first_unused_ports), )) else: self.apiroutine.retvalue = [] return for m in self.apiroutine.waitWithTimeout(5): yield m for m in execute_bash(find_unused_veth): yield m second_unused_ports = self.apiroutine.retvalue.splitlines(False) second_unused_ports = [ k.strip() for k in second_unused_ports if k.strip() ] unused_ports = list( set(first_unused_ports).intersection(second_unused_ports)) if unused_ports: print( 'Detect %d unused ports from intersection of two tries, removing...' % (len(unused_ports), )) # Remove these ports def _remove_ports(): for p in unused_ports: try: _unplug_ovs(ovscommand, ovsbridge, p[:-len('-tag')]) except Exception as exc: print( 'Remove port %r from OpenvSwitch failed: %s' % (p, exc)) try: _delete_veth(ipcommand, p[:-len('-tag')]) except Exception as exc: print('Delete port %r with ip-link failed: %s' % (p, exc)) for m in pool.runTask(self.apiroutine, _remove_ports): yield m self.apiroutine.retvalue = unused_ports return def detect_unused_logports(): # docker network ls print("Check logical ports from docker API...") for m in call_docker_api( br'/v1.24/networks?filters={"driver":["vlcp"]}'): yield m network_ports = dict( (n['Id'], dict((p['EndpointID'], p['IPv4Address']) for p in n['Containers'].values())) for n in self.apiroutine.retvalue if n['Driver'] == 'vlcp' ) # Old version of docker API does not support filter by driver print("Find %d networks and %d endpoints from docker API, recheck in 5 seconds..." % \ (len(network_ports), sum(len(ports) for ports in network_ports.values()))) def recheck_ports(): for m in self.apiroutine.waitWithTimeout(5): yield m # docker network inspect, use this for cross check second_network_ports = {} for nid in network_ports: try: for m in call_docker_api(br'/networks/' + _bytes(nid)): yield m except ValueError as exc: print( 'WARNING: check network failed, the network may be removed. Message: ', str(exc)) second_network_ports[nid] = {} else: second_network_ports[nid] = dict( (p['EndpointID'], p['IPv4Address']) for p in self.apiroutine.retvalue['Containers'].values()) print("Recheck find %d endpoints from docker API" % \ (sum(len(ports) for ports in second_network_ports.values()),)) self.apiroutine.retvalue = second_network_ports def check_viperflow(): first_vp_ports = {} for nid in network_ports: for m in callAPI( self.apiroutine, 'viperflow', 'listlogicalports', {'logicalnetwork': 'docker-' + nid + '-lognet'}): yield m first_vp_ports[nid] = dict( (p['id'], p.get('ip_address')) for p in self.apiroutine.retvalue if p['id'].startswith('docker-')) print("Find %d endpoints from viperflow database, recheck in 5 seconds..." % \ (sum(len(ports) for ports in first_vp_ports.values()),)) for m in self.apiroutine.waitWithTimeout(5): yield m second_vp_ports = {} for nid in network_ports: for m in callAPI( self.apiroutine, 'viperflow', 'listlogicalports', {'logicalnetwork': 'docker-' + nid + '-lognet'}): yield m second_vp_ports[nid] = dict( (p['id'], p.get('ip_address')) for p in self.apiroutine.retvalue if p['id'] in first_vp_ports[nid]) print("Find %d endpoints from viperflow database from the intersection of two tries" % \ (sum(len(ports) for ports in second_vp_ports.values()),)) second_vp_ports = dict((nid, dict((pid[len('docker-'):], addr) for pid, addr in v.items())) for nid, v in second_vp_ports.items()) self.apiroutine.retvalue = second_vp_ports for m in check_viperflow(): yield m second_vp_ports = self.apiroutine.retvalue for m in recheck_ports(): yield m second_ports = self.apiroutine.retvalue unused_logports = dict((nid, dict((pid, addr) for pid, addr in v.items() if pid not in network_ports[nid] and\ pid not in second_ports[nid])) for nid, v in second_vp_ports.items()) self.apiroutine.retvalue = unused_logports routines = [] if not skipovs: routines.append(invalid_ovs_ports()) if not skipiplink: routines.append(remove_unused_ports()) if not skiplogicalport: routines.append(detect_unused_logports()) for m in self.apiroutine.executeAll(routines): yield m if skiplogicalport: return (unused_logports, ) = self.apiroutine.retvalue[-1] if any(ports for ports in unused_logports.values()): print("Find %d unused logical ports, first 20 ips:\n%r" % \ (sum(len(ports) for ports in unused_logports.values()), [v for _,v in \ itertools.takewhile(lambda x: x[0] <= 20, enumerate(addr for ports in unused_logports.values() for addr in ports.values()))])) print("Will remove them in 5 seconds, press Ctrl+C to cancel...") for m in self.apiroutine.waitWithTimeout(5): yield m for ports in unused_logports.values(): for p, addr in ports.items(): try: for m in callAPI(self.apiroutine, 'viperflow', 'deletelogicalport', {'id': 'docker-' + p}): yield m except Exception as exc: print("WARNING: remove logical port %r (IP: %s) failed, maybe it is already removed. Message: %s" % \ (p, addr, exc)) print("Done.")
async def _getconnection(self, container, host, path, https=False, forcecreate=False, cafile=None, key=None, certificate=None, timeout=None): if not host: raise ValueError matcher = WebClientRequestDoneEvent.createMatcher(host, path, https) while self.sameurllimit and (host, path, https) in self._requesting: self._pathwaiting.add((host, path, https)) await matcher # Lock the path if self.sameurllimit: self._requesting.add((host, path, https)) # connmap format: (free, free_ssl, workingcount) conns = self._connmap.setdefault(host, [[], [], 0]) conns[0] = [c for c in conns[0] if c.connected] conns[1] = [c for c in conns[1] if c.connected] myset = conns[1 if https else 0] if not forcecreate and myset: # There are free connections, reuse them conn = myset.pop() conn.setdaemon(False) conns[2] += 1 return (conn, False) matcher = WebClientRequestDoneEvent.createMatcher(host) while self.samehostlimit and len(conns[0]) + len( conns[1]) + conns[2] >= self.samehostlimit: if myset: # Close a old connection conn = myset.pop() await conn.shutdown() else: # Wait for free connections self._hostwaiting.add(host) await matcher conns = self._connmap.setdefault(host, [[], [], 0]) myset = conns[1 if https else 0] if not forcecreate and myset: conn = myset.pop() conn.setdaemon(False) conns[2] += 1 return (conn, False) # Create new connection conns[2] += 1 conn = Client( urlunsplit(('ssl' if https else 'tcp', host, '/', '', '')), self._protocol, container.scheduler, key, certificate, cafile) if timeout is not None: conn.connect_timeout = timeout conn.start() connected = self._protocol.statematcher( conn, HttpConnectionStateEvent.CLIENT_CONNECTED, False) notconnected = self._protocol.statematcher( conn, HttpConnectionStateEvent.CLIENT_NOTCONNECTED, False) _, m = await M_(connected, notconnected) if m is notconnected: conns[2] -= 1 await conn.shutdown(True) raise IOError('Failed to connect to %r' % (conn.rawurl, )) if https and cafile and self.verifyhost: try: # TODO: check with SSLContext hostcheck = re.sub(r':\d+$', '', host) if host == conn.socket.remoteaddr[0]: # IP Address is currently now allowed await conn.shutdown(True) raise CertificateException( 'Cannot verify host with IP address') match_hostname(conn.socket.getpeercert(False), hostcheck) except: conns[2] -= 1 raise return (conn, True)
def _connection_manage(self): try: failed = 0 self._last_zxid = last_zxid = 0 session_id = 0 passwd = b'\x00' * 16 last_conn_time = None while True: self.currentserver = self.serverlist[self.nextptr] np = self.nextptr + 1 if np >= len(self.serverlist): np = 0 self.nextptr = np conn = Client(self.currentserver, self.protocol, self._container.scheduler, self.key, self.certificate, self.ca_certs) self.current_connection = conn conn_up = ZooKeeperConnectionStateEvent.createMatcher(ZooKeeperConnectionStateEvent.UP, conn) conn_nc = ZooKeeperConnectionStateEvent.createMatcher(ZooKeeperConnectionStateEvent.NOTCONNECTED, conn) conn.start() try: yield (conn_up, conn_nc) if self._container.matcher is conn_nc: self._logger.warning('Connect to %r failed, try next server', self.currentserver) if failed > 5: # Wait for a small amount of time to prevent a busy loop # Socket may be rejected, it may fail very quick for m in self._container.waitWithTimeout(min((failed - 5) * 0.1, 1.0)): yield m failed += 1 continue try: # Handshake set_watches = [] if self.session_state == ZooKeeperSessionStateChanged.DISCONNECTED: for m in self._container.waitForSend(ZooKeeperRestoreWatches(self, self.session_id, True, restore_watches = (set(), set(), set()))): yield m yield (ZooKeeperRestoreWatches.createMatcher(self),) data_watches, exists_watches, child_watches = \ self._container.event.restore_watches if data_watches or exists_watches or child_watches: current_set_watches = zk.SetWatches(relativeZxid = last_zxid) current_length = 0 for d, e, c in izip_longest(data_watches, exists_watches, child_watches): if d is not None: current_set_watches.dataWatches.append(d) current_length += 4 + len(d) if e is not None: current_set_watches.existWatches.append(e) current_length += 4 + len(e) if c is not None: current_set_watches.childWatches.append(c) current_length += 4 + len(c) if current_length > _MAX_SETWATCHES_SIZE: # Split set_watches set_watches.append(current_set_watches) current_set_watches = zk.SetWatches(relativeZxid = last_zxid) if current_set_watches.dataWatches or current_set_watches.existWatches \ or current_set_watches.childWatches: set_watches.append(current_set_watches) auth_list = list(self.auth_set) with closing(self._container.executeWithTimeout(10, self.protocol.handshake(conn, zk.ConnectRequest(lastZxidSeen = last_zxid, timeOut = int(self.sessiontimeout * 1000.0), sessionId = session_id, passwd = passwd, readOnly = self.readonly), self._container, [zk.AuthPacket(scheme = a[0], auth = a[1]) for a in auth_list] + set_watches))) as g: for m in g: yield m if self._container.timeout: raise IOError except ZooKeeperSessionExpiredException: self._logger.warning('Session expired.') # Session expired self.session_state = ZooKeeperSessionStateChanged.EXPIRED for m in self._container.waitForSend(ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.EXPIRED, self, session_id)): yield m if self.restart_session: failed = 0 last_zxid = 0 session_id = 0 passwd = b'\x00' * 16 last_conn_time = None continue else: break except Exception: self._logger.warning('Handshake failed to %r, try next server', self.currentserver) if failed > 5: # There is a bug ZOOKEEPER-1159 that ZooKeeper server does not respond # for session expiration, but directly close the connection. # This is a workaround: we store the time that we disconnected from the server, # if we have exceeded the session expiration time, we declare the session is expired if last_conn_time is not None and last_conn_time + self.sessiontimeout * 2 < time(): self._logger.warning('Session expired detected from client time.') # Session expired self.session_state = ZooKeeperSessionStateChanged.EXPIRED for m in self._container.waitForSend(ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.EXPIRED, self, session_id)): yield m if self.restart_session: failed = 0 last_zxid = 0 session_id = 0 passwd = b'\x00' * 16 last_conn_time = None continue else: break else: # Wait for a small amount of time to prevent a busy loop for m in self._container.waitWithTimeout(min((failed - 5) * 0.1, 1.0)): yield m failed += 1 else: failed = 0 conn_resp, auth_resp = self._container.retvalue if conn_resp.timeOut <= 0: # Session expired # Currently should not happen because handshake() should raise an exception self._logger.warning('Session expired detected from handshake packet') self.session_state = ZooKeeperSessionStateChanged.EXPIRED for m in self._container.waitForSend(ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.EXPIRED, self, session_id)): yield m if self.restart_session: failed = 0 last_zxid = 0 last_conn_time = None session_id = 0 passwd = b'\x00' * 16 continue else: break else: session_id = conn_resp.sessionId passwd = conn_resp.passwd # Authentication result check auth_failed = any(a.err == zk.ZOO_ERR_AUTHFAILED for a in auth_resp) if auth_failed: self._logger.warning('ZooKeeper authentication failed for following auth: %r', [a for a,r in zip(auth_list, auth_resp) if r.err == zk.ZOO_ERR_AUTHFAILED]) self.session_state = ZooKeeperSessionStateChanged.AUTHFAILED for m in self._container.waitForSend(ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.AUTHFAILED, self, session_id )): yield m # Not retrying break else: self.session_readonly = getattr(conn_resp, 'readOnly', False) self.session_id = session_id if self.session_state == ZooKeeperSessionStateChanged.EXPIRED: for m in self._container.waitForSend(ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.CREATED, self, session_id )): yield m else: for m in self._container.waitForSend(ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.RECONNECTED, self, session_id )): yield m self.session_state = ZooKeeperSessionStateChanged.CREATED if conn.connected: conn_down = ZooKeeperConnectionStateEvent.createMatcher(ZooKeeperConnectionStateEvent.DOWN, conn, conn.connmark ) auth_failed = ZooKeeperResponseEvent.createMatcher(zk.AUTH_XID, conn, conn.connmark, _ismatch = lambda x: x.message.err == ZOO_ERR_AUTHFAILED) while True: rebalancetime = self.rebalancetime if rebalancetime is not None: rebalancetime += random() * 60 for m in self._container.waitWithTimeout(rebalancetime, conn_down, auth_failed): yield m if self._container.timeout: # Rebalance if conn.zookeeper_requests: # There are still requests not processed, wait longer for _ in range(0, 3): longer_time = random() * 10 for m in self._container.waitWithTimeout(longer_time, conn_down, auth_failed): yield m if not self._container.timeout: # Connection is down, or auth failed break if not conn.zookeeper_requests: break else: # There is still requests, skip for this time continue # Rebalance to a random server if self._container.timeout: self.nextptr = randrange(len(self.serverlist)) break if self._container.matcher is auth_failed: self._logger.warning('ZooKeeper authentication failed, shutdown the connection') self.session_state = ZooKeeperSessionStateChanged.AUTHFAILED for m in self._container.waitForSend(ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.AUTHFAILED, self, session_id )): yield m # Not retrying break else: # Connection is down, try other servers if not self._container.timeout: self._logger.warning('Connection lost to %r, try next server', self.currentserver) else: self._logger.info('Rebalance to next server') self._last_zxid = last_zxid = conn.zookeeper_lastzxid last_conn_time = time() self.session_state = ZooKeeperSessionStateChanged.DISCONNECTED for m in self._container.waitForSend(ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.DISCONNECTED, self, session_id )): yield m finally: conn.subroutine(conn.shutdown(True), False) self.current_connection = None finally: self._shutdown = True if self.session_state != ZooKeeperSessionStateChanged.EXPIRED and self.session_state != ZooKeeperSessionStateChanged.AUTHFAILED: self.session_state = ZooKeeperSessionStateChanged.EXPIRED self._container.scheduler.emergesend(ZooKeeperSessionStateChanged( ZooKeeperSessionStateChanged.EXPIRED, self, session_id ))
def _create_docker_conn(): self._docker_conn = Client(host, http_protocol, self.scheduler, key_path, cert_path, ca_path) self._docker_conn.start() return self._docker_conn