def test_server_on_unix_socket(self): SOCKET_PATH = '/tmp/eventlet_backdoor_test.socket' if os.path.exists(SOCKET_PATH): os.unlink(SOCKET_PATH) listener = socket.socket(socket.AF_UNIX) listener.bind(SOCKET_PATH) listener.listen(5) serv = eventlet.spawn(backdoor.backdoor_server, listener) client = socket.socket(socket.AF_UNIX) client.connect(SOCKET_PATH) f = client.makefile('rw') self.assert_('Python' in f.readline()) f.readline() # build info f.readline() # help info self.assert_('InteractiveConsole' in f.readline()) self.assertEquals('>>> ', f.read(4)) f.write('print("hi")\n') f.flush() self.assertEquals('hi\n', f.readline()) self.assertEquals('>>> ', f.read(4)) f.write('exit()\n') f.close() client.close() serv.kill() # wait for the console to discover that it's dead eventlet.sleep(0.1)
def run(self): try: listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listen_sock.bind(('0.0.0.0', self.listen_port)) listen_sock.listen(50) except Exception as e: sys.exit(-1) def forward(source, target, cb=lambda: None): while True: close_channel = False data = source.recv(32384) if len(data) > 0: target.sendall(data) else: close_channel = True if close_channel: source.close() target.close() break print "listening on port %d" % self.listen_port while True: income_sock, address = listen_sock.accept() target_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) target_sock.connect((self.target_host, self.target_port)) eventlet.spawn(forward, income_sock, target_sock) eventlet.spawn(forward, target_sock, income_sock) listen_sock.shutdown(socket.SHUT_RDWR) listen_sock.close()
def test_recv_into_timeout(self): buf = array.array('B') listener = greenio.GreenSocket(socket.socket()) listener.bind(('', 0)) listener.listen(50) evt = event.Event() def server(): # accept the connection in another greenlet sock, addr = listener.accept() evt.wait() gt = eventlet.spawn(server) addr = listener.getsockname() client = greenio.GreenSocket(socket.socket()) client.settimeout(0.1) client.connect(addr) try: client.recv_into(buf) self.fail("socket.timeout not raised") except socket.timeout as e: assert hasattr(e, 'args') self.assertEqual(e.args[0], 'timed out') evt.send() gt.wait()
def test_sendall_timeout(self): listener = greenio.GreenSocket(socket.socket()) listener.bind(('', 0)) listener.listen(50) evt = event.Event() def server(): # accept the connection in another greenlet sock, addr = listener.accept() evt.wait() gt = eventlet.spawn(server) addr = listener.getsockname() client = greenio.GreenSocket(socket.socket()) client.settimeout(0.1) client.connect(addr) # want to exceed the size of the OS buffer so it'll block msg = b"A" * (8 << 20) expect_socket_timeout(client.sendall, msg) evt.send() gt.wait()
def test_recv_into_timeout(self): buf = array.array('B') listener = greenio.GreenSocket(socket.socket()) listener.bind(('', 0)) listener.listen(50) evt = event.Event() def server(): # accept the connection in another greenlet sock, addr = listener.accept() evt.wait() gt = eventlet.spawn(server) addr = listener.getsockname() client = greenio.GreenSocket(socket.socket()) client.settimeout(0.1) client.connect(addr) expect_socket_timeout(client.recv_into, buf) evt.send() gt.wait()
def test_sendall_timeout(self): listener = greenio.GreenSocket(socket.socket()) listener.bind(('', 0)) listener.listen(50) evt = event.Event() def server(): # accept the connection in another greenlet sock, addr = listener.accept() evt.wait() gt = eventlet.spawn(server) addr = listener.getsockname() client = greenio.GreenSocket(socket.socket()) client.settimeout(0.1) client.connect(addr) try: msg = b"A" * (8 << 20) # want to exceed the size of the OS buffer so it'll block client.sendall(msg) self.fail("socket.timeout not raised") except socket.timeout as e: assert hasattr(e, 'args') self.assertEqual(e.args[0], 'timed out') evt.send() gt.wait()
def test_sendall_impl(many_bytes): bufsize = max(many_bytes // 15, 2) def sender(listener): (sock, addr) = listener.accept() sock = bufsized(sock, size=bufsize) sock.sendall(b'x' * many_bytes) sock.sendall(b'y' * second_bytes) listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind(("", 0)) listener.listen(50) sender_coro = eventlet.spawn(sender, listener) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', listener.getsockname()[1])) bufsized(client, size=bufsize) total = 0 while total < many_bytes: data = client.recv(min(many_bytes - total, many_bytes // 10)) if not data: break total += len(data) total2 = 0 while total < second_bytes: data = client.recv(second_bytes) if not data: break total2 += len(data) sender_coro.wait() client.close()
def test_del_closes_socket(self): def accept_once(listener): # delete/overwrite the original conn # object, only keeping the file object around # closing the file object should close everything try: conn, addr = listener.accept() conn = conn.makefile('w') conn.write(b'hello\n') conn.close() gc.collect() self.assertWriteToClosedFileRaises(conn) finally: listener.close() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('127.0.0.1', 0)) server.listen(50) killer = eventlet.spawn(accept_once, server) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', server.getsockname()[1])) fd = client.makefile() client.close() assert fd.read() == b'hello\n' assert fd.read() == b'' killer.wait()
def __init__(self, username, password, gw, hostname=""): global HOSTNAME self.sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM, 0 ) self.sock.bind(('0.0.0.0',0)) self.un = username self.pw = password self.gw = gw self.pkt = [] if (!hostname) """ If a hostname is specified, use it, otherwise try to get hostname from a global variable. Still nothing? Try to connect to the internet and see what the iweb thinks our hostname is """ if (!HOSTNAME) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('google.com', 0)) HOSTNAME = s.getsockname()[0] self.hn = HOSTNAME else self.hn = hostname self.seq = seqNo() self.callId = "%s-%d" % ( randStr(), seqNo() ) self.tag = "%s-%d" % ( randStr(), seqNo() ) self.branch = "%s-%d" % ( randStr(), seqNo() ) self.port = self.sock.getsockname()[1] eventlet.greenthread.spawn(self.handleIncomming) raise CallError("Something something")
def test_timeout(self, socket=socket): """ Test that the socket timeout exception works correctly. """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('127.0.0.1', 0)) server.listen(1) port = server.getsockname()[1] s = socket.socket() s.connect(('127.0.0.1', port)) cs, addr = server.accept() cs.settimeout(1) try: try: cs.recv(1024) self.fail("Should have timed out") except socket.timeout as ex: assert hasattr(ex, 'args') assert len(ex.args) == 1 assert ex.args[0] == 'timed out' finally: s.close() cs.close() server.close()
def testDefaultTimeout(self): # Testing default timeout # The default timeout should initially be None self.assertEqual(socket.getdefaulttimeout(), None) s = socket.socket() self.assertEqual(s.gettimeout(), None) s.close() # Set the default timeout to 10, and see if it propagates socket.setdefaulttimeout(10) self.assertEqual(socket.getdefaulttimeout(), 10) s = socket.socket() self.assertEqual(s.gettimeout(), 10) s.close() # Reset the default timeout to None, and see if it propagates socket.setdefaulttimeout(None) self.assertEqual(socket.getdefaulttimeout(), None) s = socket.socket() self.assertEqual(s.gettimeout(), None) s.close() # Check that setting it to an invalid value raises ValueError self.assertRaises(ValueError, socket.setdefaulttimeout, -1) # Check that setting it to an invalid type raises TypeError self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
def test_close_with_makefile(self): def accept_close_early(listener): # verify that the makefile and the socket are truly independent # by closing the socket prior to using the made file try: conn, addr = listener.accept() fd = conn.makefile() conn.close() fd.write('hello\n') fd.close() # socket._fileobjects are odd: writes don't check # whether the socket is closed or not, and you get an # AttributeError during flush if it is closed fd.write('a') self.assertRaises(Exception, fd.flush) self.assertRaises(socket.error, conn.send, 'b') finally: listener.close() def accept_close_late(listener): # verify that the makefile and the socket are truly independent # by closing the made file and then sending a character try: conn, addr = listener.accept() fd = conn.makefile() fd.write('hello') fd.close() conn.send('\n') conn.close() fd.write('a') self.assertRaises(Exception, fd.flush) self.assertRaises(socket.error, conn.send, 'b') finally: listener.close() def did_it_work(server): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', server.getsockname()[1])) fd = client.makefile() client.close() assert fd.readline() == 'hello\n' assert fd.read() == '' fd.close() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1) server.bind(('0.0.0.0', 0)) server.listen(50) killer = eventlet.spawn(accept_close_early, server) did_it_work(server) killer.wait() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1) server.bind(('0.0.0.0', 0)) server.listen(50) killer = eventlet.spawn(accept_close_late, server) did_it_work(server) killer.wait()
def test_server(self): listener = socket.socket() listener.bind(('localhost', 0)) listener.listen(50) serv = eventlet.spawn(backdoor.backdoor_server, listener) client = socket.socket() client.connect(('localhost', listener.getsockname()[1])) self._run_test_on_client_and_server(client, serv)
def test_default_nonblocking(self): sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) flags = fcntl.fcntl(sock1.fd.fileno(), fcntl.F_GETFL) assert flags & os.O_NONBLOCK sock2 = socket.socket(sock1.fd) flags = fcntl.fcntl(sock2.fd.fileno(), fcntl.F_GETFL) assert flags & os.O_NONBLOCK
def test_server_on_ipv6_socket(self): listener = socket.socket(socket.AF_INET6) listener.bind(('::', 0)) listener.listen(5) serv = eventlet.spawn(backdoor.backdoor_server, listener) client = socket.socket(socket.AF_INET6) client.connect(listener.getsockname()) self._run_test_on_client_and_server(client, serv)
def testLinuxAbstractNamespace(self): address = "\x00python-test-hello\x00\xff" s1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s1.bind(address) s1.listen(1) s2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s2.connect(s1.getsockname()) s1.accept() self.assertEqual(s1.getsockname(), address) self.assertEqual(s2.getpeername(), address)
def test_skip_nonblocking(self): sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) fd = sock1.fd.fileno() flags = fcntl.fcntl(fd, fcntl.F_GETFL) flags = fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) assert flags & os.O_NONBLOCK == 0 sock2 = socket.socket(sock1.fd, set_nonblocking=False) flags = fcntl.fcntl(sock2.fd.fileno(), fcntl.F_GETFL) assert flags & os.O_NONBLOCK == 0
def test_multiple_readers(self): debug.hub_prevent_multiple_readers(False) recvsize = 2 * min_buf_size() sendsize = 10 * recvsize # test that we can have multiple coroutines reading # from the same fd. We make no guarantees about which one gets which # bytes, but they should both get at least some def reader(sock, results): while True: data = sock.recv(recvsize) if not data: break results.append(data) results1 = [] results2 = [] listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind(('127.0.0.1', 0)) listener.listen(50) def server(): (sock, addr) = listener.accept() sock = bufsized(sock) try: c1 = eventlet.spawn(reader, sock, results1) c2 = eventlet.spawn(reader, sock, results2) try: c1.wait() c2.wait() finally: c1.kill() c2.kill() finally: sock.close() server_coro = eventlet.spawn(server) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', listener.getsockname()[1])) bufsized(client, size=sendsize) # Split into multiple chunks so that we can wait a little # every iteration which allows both readers to queue and # recv some data when we actually send it. for i in range(20): eventlet.sleep(0.001) client.sendall(b'*' * (sendsize // 20)) client.close() server_coro.wait() listener.close() assert len(results1) > 0 assert len(results2) > 0 debug.hub_prevent_multiple_readers()
def test_close_with_makefile(self): def accept_close_early(listener): # verify that the makefile and the socket are truly independent # by closing the socket prior to using the made file try: conn, addr = listener.accept() fd = conn.makefile('w') conn.close() fd.write(b'hello\n') fd.close() self.assertWriteToClosedFileRaises(fd) self.assertRaises(socket.error, conn.send, b'b') finally: listener.close() def accept_close_late(listener): # verify that the makefile and the socket are truly independent # by closing the made file and then sending a character try: conn, addr = listener.accept() fd = conn.makefile('w') fd.write(b'hello') fd.close() conn.send(b'\n') conn.close() self.assertWriteToClosedFileRaises(fd) self.assertRaises(socket.error, conn.send, b'b') finally: listener.close() def did_it_work(server): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', server.getsockname()[1])) fd = client.makefile() client.close() assert fd.readline() == b'hello\n' assert fd.read() == b'' fd.close() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('0.0.0.0', 0)) server.listen(50) killer = eventlet.spawn(accept_close_early, server) did_it_work(server) killer.wait() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('0.0.0.0', 0)) server.listen(50) killer = eventlet.spawn(accept_close_late, server) did_it_work(server) killer.wait()
def test_server_on_unix_socket(self): SOCKET_PATH = '/tmp/eventlet_backdoor_test.socket' if os.path.exists(SOCKET_PATH): os.unlink(SOCKET_PATH) listener = socket.socket(socket.AF_UNIX) listener.bind(SOCKET_PATH) listener.listen(5) serv = eventlet.spawn(backdoor.backdoor_server, listener) client = socket.socket(socket.AF_UNIX) client.connect(SOCKET_PATH) self._run_test_on_client_and_server(client, serv)
def test_multiple_readers(self, clibufsize=False): debug.hub_prevent_multiple_readers(False) recvsize = 2 * min_buf_size() sendsize = 10 * recvsize # test that we can have multiple coroutines reading # from the same fd. We make no guarantees about which one gets which # bytes, but they should both get at least some def reader(sock, results): while True: data = sock.recv(recvsize) if not data: break results.append(data) results1 = [] results2 = [] listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind(('127.0.0.1', 0)) listener.listen(50) def server(): (sock, addr) = listener.accept() sock = bufsized(sock) try: c1 = eventlet.spawn(reader, sock, results1) c2 = eventlet.spawn(reader, sock, results2) try: c1.wait() c2.wait() finally: c1.kill() c2.kill() finally: sock.close() server_coro = eventlet.spawn(server) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', listener.getsockname()[1])) if clibufsize: bufsized(client, size=sendsize) else: bufsized(client) client.sendall(b'*' * sendsize) client.close() server_coro.wait() listener.close() assert len(results1) > 0 assert len(results2) > 0 debug.hub_prevent_multiple_readers()
def test_timeout(): test_support.requires('network') def error_msg(extra_msg): print >> sys.stderr, """\ WARNING: an attempt to connect to %r %s, in test_timeout. That may be legitimate, but is not the outcome we hoped for. If this message is seen often, test_timeout should be changed to use a more reliable address.""" % (ADDR, extra_msg) if test_support.verbose: print "test_timeout ..." # A service which issues a welcome banner (without need to write # anything). ADDR = "pop.gmail.com", 995 s = socket.socket() s.settimeout(30.0) try: s.connect(ADDR) except socket.timeout: error_msg('timed out') return except socket.error, exc: # In case connection is refused. if exc.args[0] == errno.ECONNREFUSED: error_msg('was refused') return else: raise
def test_connection_refused(self): # open and close a dummy server to find an unused port server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('127.0.0.1', 0)) server.listen(1) port = server.getsockname()[1] server.close() del server s = socket.socket() try: s.connect(('127.0.0.1', port)) self.fail("Shouldn't have connected") except socket.error as ex: code, text = ex.args assert code in [111, 61, 10061], (code, text) assert 'refused' in text.lower(), (code, text)
def test_send_timeout(self): self.reset_timeout(2) listener = bufsized(eventlet.listen(('', 0))) evt = event.Event() def server(): # accept the connection in another greenlet sock, addr = listener.accept() sock = bufsized(sock) evt.wait() gt = eventlet.spawn(server) addr = listener.getsockname() client = bufsized(greenio.GreenSocket(socket.socket())) client.connect(addr) try: client.settimeout(0.00001) msg = b"A" * 100000 # large enough number to overwhelm most buffers total_sent = 0 # want to exceed the size of the OS buffer so it'll block in a # single send for x in range(10): total_sent += client.send(msg) self.fail("socket.timeout not raised") except socket.timeout as e: assert hasattr(e, 'args') self.assertEqual(e.args[0], 'timed out') evt.send() gt.wait()
def run(self): eventlet.spawn_n(self.stats_flush) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) addr = (self.listen_addr, self.listen_port) sock.bind(addr) buf = 8192 self.logger.info("Listening on %s:%d" % addr) if self.debug: print "Listening on %s:%d" % addr if self.combined_events: if self.debug: print "combined_events mode enabled" while 1: data, addr = sock.recvfrom(buf) if not data: break else: for metric in data.split("#"): self.decode_recvd(metric) else: while 1: data, addr = sock.recvfrom(buf) if not data: break else: self.decode_recvd(data)
def init_server(): s = socket.socket() s.settimeout(SOCKET_TIMEOUT) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(('localhost', 0)) s.listen(5) return s, s.getsockname()[1]
def test_connect_ex_timeout(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.1) gs = greenio.GreenSocket(s) e = gs.connect_ex(('192.0.2.1', 80)) if e not in (errno.EHOSTUNREACH, errno.ENETUNREACH): self.assertEqual(e, errno.EAGAIN)
def get_config(): option = lambda k, d: os.environ.get(k, d) config = { 'app': option('SOVEREIGN_APP', None), 'threads': int(option('SOVEREIGN_THREADS', 0)), 'host': option('SOVEREIGN_HOST', '*'), 'port': int(option('SOVEREIGN_PORT', 0)), 'socket': int(option('SOVEREIGN_SOCKET', None)), 'virtual_env': option('SOVEREIGN_VIRTUAL_ENV', None), } if config['socket']: config['socket'] = socket.fromfd( config['socket'], socket.AF_INET, socket.SOCK_STREAM ) else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((config['host'], config['port'])) sock.listen(500) config['socket'] = sock if not config['app']: raise RuntimeError("App not specified in the environ.") if config['virtual_env']: activate_this = os.path.join( config['virtual_env'], 'bin', 'activate_this.py' ) execfile(activate_this, dict(__file__=activate_this)) return config
def __init__(self, io_loop, port=843, policy_file='flashpolicy.xml'): """Constructor. `io_loop` IOLoop instance `port` Port to listen on (defaulted to 843) `policy_file` Policy file location """ self.policy_file = policy_file self.port = port sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) sock.bind(('', self.port)) sock.listen(128) self.io_loop = io_loop callback = functools.partial(self.connection_ready, sock) self.io_loop.add_handler(sock.fileno(), callback, self.io_loop.READ)
def __init__(self): """ initialize a bunch of config crap. """ logger.info('initializing uplink and protocol..') self.protocol = protocol.Protocol() self.conf = { 'server': var.Configuration['server'], 'port': int(var.Configuration['port']), 'ssl': var.Configuration['ssl'], 'password': var.Configuration['password'], 'host': var.c.get('uplink', 'host'), 'bind': var.c.get('main', 'bind'), 'serverid': var.c.get('uplink', 'SID'), 'mnick': var.Configuration['nick'], 'mident': var.Configuration['ident'], 'mgecos': var.Configuration['gecos'], 'mhost': var.Configuration['host'] } self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if ssl and self.conf['ssl'] == 'True': self.connection = ssl.wrap_socket(self.socket) logger.info('<-> connection type: ssl') else: self.connection = self.socket logger.info('<-> conection type: plain') var.core = ((self, event.Events())) var.protocol = self.protocol var.database = database.Database()
def testGetSockOpt(self): # Testing getsockopt() # We know a socket should start without reuse==0 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) self.failIf(reuse != 0, "initial mode is reuse")
def test_connect_ex_success(): # https://github.com/eventlet/eventlet/issues/696 server = eventlet.listen(("127.0.0.1", 0)) client = socket.socket() result = client.connect_ex(server.getsockname()) assert result == 0
def scan(srvtypes=_slp_services, addresses=None, localonly=False): """Find targets providing matching requested srvtypes This is a generator that will iterate over respondants to the SrvType requested. :param srvtypes: An iterable list of the service types to find :param addresses: An iterable of addresses/ranges. Default is to scan local network segment using multicast and broadcast. Each address can be a single address, hyphen-delimited range, or an IP/CIDR indication of a network. :return: Iterable set of results """ net = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # increase RCVBUF to max, mitigate chance of # failure due to full buffer. net.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16777216) net4.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16777216) # SLP is very poor at scanning large counts and managing it, so we # must make the best of it # Some platforms/config default to IPV6ONLY, we are doing IPv4 # too, so force it #net.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) # we are going to do broadcast, so allow that... initxid = random.randint(0, 32768) xididx = 0 xidmap = {} # First we give fast repsonders of each srvtype individual chances to be # processed, mitigating volume of response traffic rsps = {} for srvtype in srvtypes: xididx += 1 _find_srvtype(net, net4, srvtype, addresses, initxid + xididx) xidmap[initxid + xididx] = srvtype _grab_rsps((net, net4), rsps, 0.1, xidmap) # now do a more slow check to work to get stragglers, # but fortunately the above should have taken the brunt of volume, so # reduced chance of many responses overwhelming receive buffer. _grab_rsps((net, net4), rsps, 1, xidmap) # now to analyze and flesh out the responses for id in rsps: if 'service:ipmi' in rsps[id]['services']: if 'service:ipmi://Athena:623' in rsps[id]['urls']: rsps[id]['services'] = ['service:thinkagile-storage'] else: continue if localonly: for addr in rsps[id]['addresses']: if 'fe80' in addr[0]: break else: continue _add_attributes(rsps[id]) if 'service:lighttpd' in rsps[id]['services']: currinf = rsps[id] curratt = currinf.get('attributes', {}) if curratt.get('System-Manufacturing', [None])[0] == 'Lenovo' and curratt.get( 'type', [None])[0] == 'LenovoThinkServer': currinf['services'] = ['service:lenovo-tsm'] curratt['enclosure-serial-number'] = curratt['Product-Serial'] curratt['enclosure-machinetype-model'] = curratt[ 'Machine-Type'] else: continue del rsps[id]['payload'] del rsps[id]['function'] del rsps[id]['xid'] yield rsps[id]
def __init__(self, host, port=42217): self.sock = socket.socket() self.sock.connect((host, port)) self.buf = ''
def snoop(handler): """Watch for SLP activity handler will be called with a dictionary of relevant attributes :param handler: :return: """ active_scan(handler) net = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) net.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) slpg = socket.inet_pton(socket.AF_INET6, 'ff01::123') slpg2 = socket.inet_pton(socket.AF_INET6, 'ff02::123') for i6idx in util.list_interface_indexes(): mreq = slpg + struct.pack('=I', i6idx) net.setsockopt(IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq) mreq = slpg2 + struct.pack('=I', i6idx) net.setsockopt(IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq) net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) net.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) net4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) for i4 in util.list_ips(): if 'broadcast' not in i4: continue slpmcast = socket.inet_aton('239.255.255.253') + \ socket.inet_aton(i4['addr']) try: net4.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, slpmcast) except socket.error as e: if e.errno != 98: raise # socket in use can occur when aliased ipv4 are encountered net.bind(('', 427)) net4.bind(('', 427)) while True: newmacs = set([]) r, _, _ = select.select((net, net4), (), (), 60) # clear known_peers and peerbymacaddress # to avoid stale info getting in... # rely upon the select(0.2) to catch rapid fire and aggregate ip # addresses that come close together # calling code needs to understand deeper context, as snoop # will now yield dupe info over time known_peers = set([]) peerbymacaddress = {} neighutil.update_neigh() while r: for s in r: (rsp, peer) = s.recvfrom(9000) ip = peer[0].partition('%')[0] if ip not in neighutil.neightable: continue if peer in known_peers: continue known_peers.add(peer) mac = neighutil.neightable[ip] if mac in peerbymacaddress: peerbymacaddress[mac]['addresses'].append(peer) else: q = query_srvtypes(peer) if not q or not q[0]: # SLP might have started and not ready yet # ignore for now known_peers.discard(peer) continue # we want to prioritize the very well known services svcs = [] for svc in q: if svc in _slp_services: svcs.insert(0, svc) else: svcs.append(svc) peerbymacaddress[mac] = { 'services': svcs, 'addresses': [peer], } newmacs.add(mac) r, _, _ = select.select((net, net4), (), (), 0.2) for mac in newmacs: peerbymacaddress[mac]['xid'] = 1 _add_attributes(peerbymacaddress[mac]) peerbymacaddress[mac]['hwaddr'] = mac handler(peerbymacaddress[mac])
def _enableStaticProvisioning(self): # The Aastra firmware is stateful, in an annoying way. Just submitting # a POST to the autoprovisioning URL from the factory-default setting will # only get a 200 OK with a message to visit sysinfo.html, and the settings # will NOT be applied. # To actually apply the settings, it is required to perform a dummy GET # to /sysinfo.html, discard anything returned, and only then # perform the POST. # Additionally, the TCP/IP and HTTP stack of the Aastra 6739i is buggy. # When performing a POST, the firmware wants the end of the headers and # the start of the body in the same TCP/IP packet. If they are on # different packets, the request hangs. Due to the way urllib2 works, # it introduces a flush between the two, which triggers said hang. # Therefore, the full POST request must be assembled and sent manually # as a single write. if not self._doAuthGet('/sysinfo.html'): return False # Set the Elastix server as the provisioning server postvars = { 'protocol' : 'TFTP', 'tftp' : self._serverip, 'tftppath' : '', 'alttftp' : self._serverip, 'alttftppath' : '', 'usealttftp' : '1', 'ftpserv' : '', 'ftppath' : '', 'ftpuser' : '', 'ftppass' : '', 'httpserv' : '', 'httppath' : '', 'httpport' : 80, 'httpsserv' : '', 'httpspath' : '', 'httpsport' : 80, 'autoResyncMode': 0, 'autoResyncTime': '00:00', 'maxDelay' : 15, 'days' : 0, 'postList' : self._serverip, } postbody = urllib.urlencode(postvars) urlpath = '/configurationServer.html' postrequest = (\ 'POST %s HTTP/1.1\r\n' +\ 'Host: %s\r\n' +\ 'Connection: close\r\n' +\ 'Accept-Encoding: identity\r\n' +\ 'Authorization: Basic %s\r\n' +\ 'Content-length: %d\r\n' +\ 'Content-type: application/x-www-form-urlencoded\r\n' +\ '\r\n%s') % (urlpath, self._ip, base64.encodestring('%s:%s' % (self._http_username, self._http_password)).strip(), len(postbody), postbody) try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self._ip, 80)) sock.sendall(postrequest) # Rather than parse the response myself, I create an instance of # HTTPResponse. However, begin() is an internal method, and not # guaranteed to exist in future versions of the library. resp = httplib.HTTPResponse(sock, strict=1, method='POST') resp.begin() htmlbody = resp.read() if resp.status <> 200: logging.error('Endpoint %s@%s failed to post configuration - %s' % (self._vendorname, self._ip, r)) return False if not 'Provisioning complete' in htmlbody: logging.error('Endpoint %s@%s failed to set configuration server - not provisioned' % (self._vendorname, self._ip)) return False except socket.error, e: logging.error('Endpoint %s@%s failed to connect - %s' % (self._vendorname, self._ip, str(e))) return False
def test_wrap_socket(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('127.0.0.1', 0)) sock.listen(50) ssl.wrap_socket(sock)
def min_buf_size(): """Return the minimum buffer size that the platform supports.""" test_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) test_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1) return test_sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
def send_event(payload): addr = ('127.0.0.1', 8125) udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udp_socket.sendto(payload, addr)
def snoop(handler, byehandler=None, protocol=None, uuidlookup=None): """Watch for SSDP notify messages The handler shall be called on any service coming online. byehandler is called whenever a system advertises that it is departing. If no byehandler is specified, byebye messages are ignored. The handler is given (as possible), the mac address, a list of viable sockaddrs to reference the peer, and the notification type (e.g. 'urn:dmtf-org:service:redfish-rest:1' :param handler: A handler for online notifications from network :param byehandler: Optional handler for devices going off the network """ # Normally, I like using v6/v4 agnostic socket. However, since we are # dabbling in multicast wizardry here, such sockets can cause big problems, # so we will have two distinct sockets tracelog = log.Logger('trace') known_peers = set([]) net6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) for ifidx in util.list_interface_indexes(): v6grp = ssdp6mcast + struct.pack('=I', ifidx) net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, v6grp) net6.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) for i4 in util.list_ips(): ssdp4mcast = socket.inet_pton(socket.AF_INET, mcastv4addr) + \ socket.inet_aton(i4['addr']) try: net4.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, ssdp4mcast) except socket.error as e: if e.errno != 98: # errno 98 can happen if aliased, skip for now raise net4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) net4.bind(('', 1900)) net6.bind(('', 1900)) peerbymacaddress = {} while True: try: newmacs = set([]) machandlers = {} r, _, _ = select.select((net4, net6), (), (), 60) neighutil.update_neigh() while r: for s in r: (rsp, peer) = s.recvfrom(9000) if rsp[:4] == b'PING': continue rsp = rsp.split(b'\r\n') method, _, _ = rsp[0].split(b' ', 2) if method == b'NOTIFY': ip = peer[0].partition('%')[0] if ip not in neighutil.neightable: continue if peer in known_peers: continue mac = neighutil.neightable[ip] known_peers.add(peer) newmacs.add(mac) if mac in peerbymacaddress: peerbymacaddress[mac]['addresses'].append(peer) else: peerbymacaddress[mac] = { 'hwaddr': mac, 'addresses': [peer], } peerdata = peerbymacaddress[mac] for headline in rsp[1:]: if not headline: continue headline = util.stringify(headline) header, _, value = headline.partition(':') header = header.strip() value = value.strip() if header == 'NT': peerdata['service'] = value elif header == 'NTS': if value == 'ssdp:byebye': machandlers[mac] = byehandler elif value == 'ssdp:alive': machandlers[mac] = None # handler elif method == b'M-SEARCH': if not uuidlookup: continue #ip = peer[0].partition('%')[0] for headline in rsp[1:]: if not headline: continue headline = util.stringify(headline) headline = headline.partition(':') if len(headline) < 3: continue if headline[0] == 'ST' and headline[-1].startswith(' urn:xcat.org:service:confluent:'): try: cfm.check_quorum() except Exception: continue for query in headline[-1].split('/'): if query.startswith('uuid='): curruuid = query.split('=', 1)[1].lower() node = uuidlookup(curruuid) if not node: break # Do not bother replying to a node that # we have no deployment activity # planned for cfg = cfm.ConfigManager(None) cfd = cfg.get_node_attributes( node, 'deployment.pendingprofile') if not cfd.get(node, {}).get( 'deployment.pendingprofile', {}).get('value', None): break currtime = time.time() seconds = int(currtime) msecs = int(currtime * 1000 % 1000) reply = 'HTTP/1.1 200 OK\r\nNODENAME: {0}\r\nCURRTIME: {1}\r\nCURRMSECS: {2}\r\n'.format(node, seconds, msecs) if '%' in peer[0]: iface = peer[0].split('%', 1)[1] reply += 'MGTIFACE: {0}\r\n'.format( peer[0].split('%', 1)[1]) ncfg = netutil.get_nic_config( cfg, node, ifidx=iface) if ncfg.get('matchesnodename', None): reply += 'DEFAULTNET: 1\r\n' if not isinstance(reply, bytes): reply = reply.encode('utf8') s.sendto(reply, peer) r, _, _ = select.select((net4, net6), (), (), 0.2) for mac in newmacs: thehandler = machandlers.get(mac, None) if thehandler: thehandler(peerbymacaddress[mac]) except Exception: tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, event=log.Events.stacktrace)
def connect_tcp(hostport): rv = socket.socket() rv.connect(hostport) return rv
def setUp(self): self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) global PORT PORT = test_support.bind_port(self.serv, HOST, PORT)
def _open_socket(self): return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def snoop(handler, byehandler=None): """Watch for SSDP notify messages The handler shall be called on any service coming online. byehandler is called whenever a system advertises that it is departing. If no byehandler is specified, byebye messages are ignored. The handler is given (as possible), the mac address, a list of viable sockaddrs to reference the peer, and the notification type (e.g. 'urn:dmtf-org:service:redfish-rest:1' :param handler: A handler for online notifications from network :param byehandler: Optional handler for devices going off the network """ # Normally, I like using v6/v4 agnostic socket. However, since we are # dabbling in multicast wizardry here, such sockets can cause big problems, # so we will have two distinct sockets known_peers = set([]) net6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) for ifidx in util.list_interface_indexes(): v6grp = ssdp6mcast + struct.pack('=I', ifidx) net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, v6grp) net6.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) for i4 in util.list_ips(): ssdp4mcast = socket.inet_pton(socket.AF_INET, mcastv4addr) + \ socket.inet_aton(i4['addr']) net4.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, ssdp4mcast) net4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) net4.bind(('', 1900)) net6.bind(('', 1900)) peerbymacaddress = {} while True: newmacs = set([]) machandlers = {} r, _, _ = select.select((net4, net6), (), (), 60) neighutil.update_neigh() while r: for s in r: (rsp, peer) = s.recvfrom(9000) rsp = rsp.split('\r\n') method, _, _ = rsp[0].split(' ', 2) if method == 'NOTIFY': ip = peer[0].partition('%')[0] if ip not in neighutil.neightable: continue if peer in known_peers: continue mac = neighutil.neightable[ip] known_peers.add(peer) newmacs.add(mac) if mac in peerbymacaddress: peerbymacaddress[mac]['peers'].append(peer) else: peerbymacaddress[mac] = { 'hwaddr': mac, 'peers': [peer], } peerdata = peerbymacaddress[mac] for headline in rsp[1:]: if not headline: continue header, _, value = headline.partition(':') header = header.strip() value = value.strip() if header == 'NT': peerdata['service'] = value elif header == 'NTS': if value == 'ssdp:byebye': machandlers[mac] = byehandler elif value == 'ssdp:alive': machandlers[mac] = handler r, _, _ = select.select((net4, net6), (), (), 0.1) for mac in newmacs: thehandler = machandlers.get(mac, None) if thehandler: thehandler(peerbymacaddress[mac])
def clientSetUp(self): self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def connect(self): self.sock = socket.socket() backend_address = backend.get_backend(self.host) or self.host self.sock.connect((backend_address, self.port))
def run(self, bindaddr): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(bindaddr) while True: recvData, source = sock.recvfrom(65500) eventlet.spawn_n(self.call_dispatch, recvData, source, sock)
def _init_sgip_connection(self): self.__csock = socket.socket() ip = socket.gethostbyname(self._host) self.__csock.connect((ip, self._port)) logger.info('%s connected' % self._host)
def __init__(self, appid='', token=''): super(SfkModel, self).__init__() self.appid = appid self.token = token self.sock = socket.socket() self.guids = set()
def test_server_starvation(self, sendloops=15): recvsize = 2 * min_buf_size() sendsize = 10000 * recvsize results = [[] for i in range(5)] listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind(('127.0.0.1', 0)) port = listener.getsockname()[1] listener.listen(50) base_time = time.time() def server(my_results): sock, addr = listener.accept() datasize = 0 t1 = None t2 = None try: while True: data = sock.recv(recvsize) if not t1: t1 = time.time() - base_time if not data: t2 = time.time() - base_time my_results.append(datasize) my_results.append((t1, t2)) break datasize += len(data) finally: sock.close() def client(): pid = os.fork() if pid: return pid client = _orig_sock.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', port)) bufsized(client, size=sendsize) for i in range(sendloops): client.sendall(b'*' * sendsize) client.close() os._exit(0) clients = [] servers = [] for r in results: servers.append(eventlet.spawn(server, r)) for r in results: clients.append(client()) for s in servers: s.wait() for c in clients: os.waitpid(c, 0) listener.close() # now test that all of the server receive intervals overlap, and # that there were no errors. for r in results: assert len(r) == 2, "length is %d not 2!: %s\n%s" % (len(r), r, results) assert r[0] == sendsize * sendloops assert len(r[1]) == 2 assert r[1][0] is not None assert r[1][1] is not None starttimes = sorted(r[1][0] for r in results) endtimes = sorted(r[1][1] for r in results) runlengths = sorted(r[1][1] - r[1][0] for r in results) # assert that the last task started before the first task ended # (our no-starvation condition) assert starttimes[-1] < endtimes[0], \ "Not overlapping: starts %s ends %s" % (starttimes, endtimes) maxstartdiff = starttimes[-1] - starttimes[0] assert maxstartdiff * 2 < runlengths[0], \ "Largest difference in starting times more than twice the shortest running time!" assert runlengths[0] * 2 > runlengths[-1], \ "Longest runtime more than twice as long as shortest!"
from eventlet.green import socket from eventlet.green.OpenSSL import SSL # insecure context, only for example purposes context = SSL.Context(SSL.SSLv23_METHOD) context.use_privatekey_file('server.key') context.use_certificate_file('server.crt') # create underlying green socket and wrap in SSL sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) connection = SSL.Connection(context, sock) # configure as server connection.set_accept_state() connection.bind(('127.0.0.1', 8443)) connection.listen(50) # accept one client connection then close up shop client_conn, addr = connection.accept() print(client_conn.read(100)) client_conn.shutdown() client_conn.close() connection.close()
def testNameOverflow(self): address = "\x00" + "h" * self.UNIX_PATH_MAX s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.assertRaises(socket.error, s.bind, address)
def download_metadata(self, address, infohash, metadata_queue, timeout=5): metadata = [] start_time = time() if infohash in self.dowloaded: return try: the_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # the_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # the_socket.bind(('0.0.0.0', 9000)) the_socket.settimeout(timeout) the_socket.connect(address) # handshake send_handshake(the_socket, infohash) packet = the_socket.recv(4096) # handshake error if not check_handshake(packet, infohash): return # ext handshake send_ext_handshake(the_socket) packet = the_socket.recv(4096) # get ut_metadata and metadata_size ut_metadata, metadata_size = get_ut_metadata( packet), get_metadata_size(packet) # request each piece of metadata for piece in range(int(math.ceil(metadata_size / (16.0 * 1024)))): if infohash in self.dowloaded: break request_metadata(the_socket, ut_metadata, piece) packet = recvall(the_socket, timeout) # the_socket.recv(1024*17) metadata.append(packet[packet.index("ee") + 2:]) if '6:pieces' in packet: break except socket.timeout: logger.debug('Connect timeout to %s:%d' % address) # TODO: Maybe need NAT Traversa except socket.error as error: errno, err_msg = error if errno == 10052: logger.debug( 'Network dropped connection on reset(10052) %s:%d' % address) elif errno == 10061: logger.debug('Connection refused(10061) %s:%d' % address) else: logger.error(err_msg) except Exception: pass finally: the_socket.close() metadata = "".join(metadata) if metadata.startswith('d') and '6:pieces' in metadata: metadata = metadata[:metadata.index('6:pieces')] + 'e' try: d_metadata = bdecode(metadata) except Exception as e: logger.error(str(e) + 'metadata: ' + metadata) else: self.dowloaded.add(infohash) metadata_queue.put( (infohash, address, d_metadata, time() - start_time))
def testMaxName(self): address = "\x00" + "h" * (self.UNIX_PATH_MAX - 1) s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.bind(address) self.assertEqual(s.getsockname(), address)
def testSendAfterClose(self): # testing send() after close() with timeout sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(1) sock.close() self.assertRaises(socket.error, sock.send, "spam")
def testSetSockOpt(self): # Testing setsockopt() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) self.failIf(reuse == 0, "failed to set reuse mode")
def test_sockopt_interface(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) assert sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 0 assert sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) == b'\000' sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def test_socket_api_family(): # It was named family_or_realsock # https://github.com/eventlet/eventlet/issues/319 socket.socket(family=socket.AF_INET)
def test_shutdown_safe(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.close() # should not raise greenio.shutdown_safe(sock)
def _find_service(service, target): net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) net6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) if target: addrs = socket.getaddrinfo(target, 1900, 0, socket.SOCK_DGRAM) for addr in addrs: host = addr[4][0] if addr[0] == socket.AF_INET: msg = smsg.format(host, service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net4.sendto(msg, addr[4]) elif addr[0] == socket.AF_INET6: host = '[{0}]'.format(host) msg = smsg.format(host, service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net6.sendto(msg, addr[4]) else: net4.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) for idx in util.list_interface_indexes(): net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, idx) try: msg = smsg.format('[{0}]'.format(mcastv6addr), service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net6.sendto(msg, (mcastv6addr, 1900, 0, 0)) except socket.error: # ignore interfaces without ipv6 multicast causing error pass for i4 in util.list_ips(): if 'broadcast' not in i4: continue addr = i4['addr'] bcast = i4['broadcast'] net4.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(addr)) msg = smsg.format(mcastv4addr, service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net4.sendto(msg, (mcastv4addr, 1900)) msg = smsg.format(bcast, service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net4.sendto(msg, (bcast, 1900)) # SSDP by spec encourages responses to spread out over a 3 second interval # hence we must be a bit more patient deadline = util.monotonic_time() + 4 r, _, _ = select.select((net4, net6), (), (), 4) peerdata = {} while r: for s in r: (rsp, peer) = s.recvfrom(9000) neighutil.refresh_neigh() _parse_ssdp(peer, rsp, peerdata) timeout = deadline - util.monotonic_time() if timeout < 0: timeout = 0 r, _, _ = select.select((net4, net6), (), (), timeout) for nid in peerdata: for url in peerdata[nid].get('urls', ()): if url.endswith('/desc.tmpl'): info = urlopen(url).read() if '<friendlyName>Athena</friendlyName>' in info: peerdata[nid]['services'] = ['service:thinkagile-storage'] yield peerdata[nid]