def __init__(self, listener, cli_timeout=60): StreamServer.__init__(self, listener) self.listener = listener self.cli_timeout = cli_timeout self.php_cons = dict() self.msg_queue = Queue() self.cli_conns = cppsconn.CppsConn(self.msg_queue)
def test_connection(self): """ Tests if the VNC capability is up, and tries login. """ options = {'enabled': 'True', 'port': 0, 'users': {'test': 'test'}} cap = vnc.Vnc(options, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client_socket.connect(('127.0.0.1', srv.server_port)) protocol_version = client_socket.recv(1024) self.assertEquals(protocol_version, 'RFB 003.007\n') client_socket.send(RFB_VERSION) supported_auth_methods = client_socket.recv(1024) self.assertEquals(supported_auth_methods, SUPPORTED_AUTH_METHODS) client_socket.send(VNC_AUTH) challenge = client_socket.recv(1024) # Send 16 bytes because server expects them. Don't care what they # are client_socket.send('\x00' * 16) auth_status = client_socket.recv(1024) self.assertEquals(auth_status, AUTH_FAILED)
def test_command_list(self): options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'max_attempts': 3}, 'users': {'test': 'test'}} cap = honeypot_telnet.Telnet(options, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() bait_info = { 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1', 'honeypot_id': '1234' } BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bee = bee_telnet.Telnet(bait_info) current_bee.connect() current_bee.login(bait_info['username'], bait_info['password']) resp = current_bee.ls() self.assertTrue('var' in resp)
def test_login(self): """FTP: Testing different login combinations""" options = {'enabled': 'True', 'port': 0, 'banner': 'Test Banner', 'protocol_specific_data': {'max_attempts': 3, 'banner': 'test banner', 'syst_type': 'Test Type'}, 'users': {'test': 'test'}} cap = honeypot_ftp.ftp(options, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() bee_info = { 'enabled': True, 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1' } beesessions = {} BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bait = bee_ftp.Ftp(bee_info) current_bait.connect() current_bait.login(bee_info['username'], bee_info['password']) srv.stop()
def test_command_cd(self): options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'max_attempts': 3}, 'users': {'test': 'test'}} cap = honeypot_telnet.Telnet(options, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() bait_info = { 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1' } BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bait = bee_telnet.Telnet(bait_info) current_bait.connect() current_bait.login(bait_info['username'], bait_info['password']) # Command: cd self.assertEquals('/', current_bait.state['working_dir']) current_bait.cd('/var') self.assertEquals('/var', current_bait.state['working_dir'])
def __init__(self, listener, profiler=None, interval=INTERVAL, log=LOG, pickle_protocol=PICKLE_PROTOCOL, **server_kwargs): StreamServer.__init__(self, listener, **server_kwargs) ProfilingServer.__init__(self, profiler, interval, log, pickle_protocol) self.lock = Semaphore() self.profiling_greenlet = None
def close(self): if self.closed: sys.exit('Multiple exit signals received - aborting.') else: note('Closing socket') send_bye(self.socket) StreamServer.close()
def test_login(self): """Tests if the SMTP bee can send emails to the SMTP capability""" sessions = {} users = {'test': BaitUser('test', 'test')} authenticator = Authenticator(users) Session.authenticator = authenticator cap = hive_smtp.smtp(sessions, {'enabled': 'True', 'port': 0, 'banner': 'Test'}, users, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() bee_info = { 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1', 'local_hostname': 'testhost' } beesessions = {} BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bee = bee_smtp.smtp(beesessions, bee_info) current_bee.connect() current_bee.login(bee_info['username'], bee_info['password']) result = current_bee.client.sendmail('*****@*****.**', '*****@*****.**', 'Just testing the SMTP bee') self.assertEquals(result, {}) srv.stop()
def stop(self, *args, **kwargs): self.logger.info("Agent server {} stopping".format(self.listener)) StreamServer.close(self) for serv in self.server.agent_clients.values(): serv.stop() Component.stop(self) self.logger.info("Exit")
class ZookeeperCoordinator(object): """Network coordinator that discovers nodes using zookeeper.""" def __init__(self, mesh, localnode, coordinator, address=None, port=45429): self.framework = ZookeeperFramework(coordinator, chroot='/pyact') self.localnode = localnode self.mesh = mesh self.port = port self.address = address def _publish(self): """Publish the local node.""" self.framework.create().parents_if_needed().as_ephemeral().with_data( '%s:%d' % (self.address, self.port)).for_path( os.path.join('nodes', self.localnode.id)) def accept(self, socket, address): """Accept an incoming connection. Note that this is called in an isolated greenlet, we can therefor block. """ return handle_connection(socket, self.mesh) def start(self): """Start the coordinator. This will connect to the ZooKeeper cluster and register our local node. It will also establish connections to other parties of the mesh. """ self.server = StreamServer(('0.0.0.0', self.port), self.accept) self.server.start() self.framework.connect()
def test_retrieve(self): """ Tests if a mail can be properly retrieved from the mail corpus """ sessions = {} users = {'test': BaitUser('test', 'test')} authenticator = Authenticator(users) Session.authenticator = authenticator cap = hive_smtp.smtp(sessions, {'enabled': 'True', 'port': 0, 'banner': 'Test'}, users, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() gevent.sleep() bee_info = { 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1', 'local_hostname': 'testhost' } beesessions = {} BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bee = bee_smtp.smtp(beesessions, bee_info) from_addr, to_addr, mail_body = current_bee.get_one_mail() self.assertGreater(len(from_addr), 0) self.assertGreater(len(to_addr), 0) self.assertGreater(len(mail_body), 0)
def run(self): self.socket.setblocking(1) pool = Pool(self.worker_connections) if self.server_class is not None: server = self.server_class( self.socket, application=self.wsgi, spawn=pool, log=self.log, handler_class=self.wsgi_handler) else: server = StreamServer(self.socket, handle=self.handle, spawn=pool) server.start() try: while self.alive: self.notify() if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) break gevent.sleep(1.0) except KeyboardInterrupt: pass try: # Try to stop connections until timeout self.notify() server.stop(timeout=self.timeout) except: pass
def main(): cfg = utils.getcfg([ 'serve.conf', '~/.webserver/serve.conf', '/etc/webserver/serve.conf' ]) utils.initlog(cfg.get('log', 'loglevel'), cfg.get('log', 'logfile')) addr = (cfg.get('main', 'addr'), cfg.getint('main', 'port')) engine = cfg.get('server', 'engine') if engine == 'apps': import apps ws = http.WebServer(apps.dis, cfg.get('log', 'access')) elif engine == 'wsgi': import app_webpy ws = http.WSGIServer(app_webpy.app.wsgifunc(), cfg.get('log', 'access')) else: raise Exception('invaild engine %s' % engine) server = cfg.get('server', 'server') if server == 'gevent': from gevent.server import StreamServer ws = StreamServer(addr, ws.handler) elif server == 'thread': ws = ThreadServer(addr, ws.handler) else: raise Exception('invaild server %s' % server) try: ws.serve_forever() except KeyboardInterrupt: pass
class TCPIn(Actor): """ Receive Events over TCP """ def __init__(self, name, port=None, host=None, *args, **kwargs): super(TCPIn, self)._init__(name, *args, **kwargs) self.blockdiag_config["shape"] = "cloud" self.port = port or DEFAULT_PORT self.host = host or "0.0.0.0" self.server = StreamServer((self.host, self.port), self.connection_handler) def consume(self, event, *args, **kwargs): pass def pre_hook(self): self.logger.info("Connecting to {0} on {1}".format(self.host, self.port)) self.server.start() def post_hook(self): self.server.stop() def connection_handler(self, socket, address): event_string = "" for l in socket.makefile('r'): event_string += l try: event = pickle.loads(event_string) self.send_event(event) except Exception: self.logger.error("Received invalid event format: {0}".format(event_string))
def test_valid_login(self): """Tests if telnet server responds correctly to a VALID login attempt.""" # curses dependency in the telnetserver need a STDOUT with file descriptor. sys.stdout = tempfile.TemporaryFile() # initialize capability and start tcp server options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'max_attempts': 3}, 'users': {'test': 'test'}} cap = telnet.Telnet(options, self.work_dir) server = StreamServer(('0.0.0.0', 0), cap.handle_session) server.start() client = telnetlib.Telnet('localhost', server.server_port) # set this to 1 if having problems with this test client.set_debuglevel(0) # this disables all command negotiation. client.set_option_negotiation_callback(self.cb) #Expect username as first output reply = client.read_until('Username: '******'Username: '******'test' + '\r\n') reply = client.read_until('Password: '******'Password: '******'test' + '\r\n') reply = client.read_until('$ ') self.assertTrue(reply.endswith('$ ')) server.stop()
def test_command_echo(self): options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'max_attempts': 3}, 'users': {'test': 'test'}} cap = honeypot_ssh.SSH(options, self.work_dir, self.key) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() bait_info = { 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1' } BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bee = client_ssh.Ssh(bait_info) current_bee.connect_login() resp = current_bee.echo('just testing!') self.assertTrue('just testing!' in resp)
def test_logout(self): """Tests if the SSH bait can Logout from the SSH capability""" options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'max_attempts': 3}, 'users': {'test': 'test'}} cap = honeypot_ssh.SSH(options, self.work_dir, self.key) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() bait_info = { 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1' } BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bee = client_ssh.Ssh(bait_info) current_bee.connect_login() current_bee.logout() srv.stop()
def test_AUTH_CRAM_MD5_reject(self): """ Makes sure the server rejects all login attempts that use the CRAM-MD5 Authentication method. """ sessions = {} users = {} #provide valid login/pass to authenticator authenticator = Authenticator(users) Session.authenticator = authenticator options = {'enabled': 'True', 'port': 0, 'banner': 'Test'} cap = smtp.smtp(sessions, options, users, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() def encode_cram_md5(challenge, user, password): challenge = base64.decodestring(challenge) response = user + ' ' + hmac.HMAC(password, challenge).hexdigest() return base64.b64encode(response) smtp_ = smtplib.SMTP('127.0.0.1', srv.server_port, local_hostname='localhost', timeout=15) _, resp = smtp_.docmd('AUTH', 'CRAM-MD5') code, resp = smtp_.docmd(encode_cram_md5(resp, 'test', 'test')) # For now, the server's going to return a 535 code. self.assertEqual(code, 535) srv.stop()
def server_loop(self): if FLAGS.ctl_privkey and FLAGS.ctl_cert is not None: if FLAGS.ca_certs is not None: server = StreamServer((FLAGS.ofp_listen_host, FLAGS.ofp_ssl_listen_port), datapath_connection_factory, keyfile=FLAGS.ctl_privkey, certfile=FLAGS.ctl_cert, cert_reqs=ssl.CERT_REQUIRED, ca_certs=FLAGS.ca_certs, ssl_version=ssl.PROTOCOL_TLSv1) else: server = StreamServer((FLAGS.ofp_listen_host, FLAGS.ofp_ssl_listen_port), datapath_connection_factory, keyfile=FLAGS.ctl_privkey, certfile=FLAGS.ctl_cert, ssl_version=ssl.PROTOCOL_TLSv1) else: server = StreamServer((FLAGS.ofp_listen_host, FLAGS.ofp_tcp_listen_port), datapath_connection_factory) #LOG.debug('loop') server.serve_forever()
def test_login(self): """Testing different login combinations""" login_sequences = [ # invalid login, invalid password (('USER wakkwakk', '+OK User accepted'), ('PASS wakkwakk', '-ERR Authentication failed.')), # PASS without user (('PASS bond', '-ERR No username given.'),), # Try to run a TRANSACITON state command in AUTHORIZATION state (('RETR', '-ERR Unknown command'),), ] options = {'port': 110, 'protocol_specific_data': {'max_attempts': 3}, 'users': {'james': 'bond'}} sut = Pop3(options) server = StreamServer(('127.0.0.1', 0), sut.handle_session) server.start() for sequence in login_sequences: client = gevent.socket.create_connection(('127.0.0.1', server.server_port)) fileobj = client.makefile() # skip banner fileobj.readline() for pair in sequence: client.sendall(pair[0] + "\r\n") response = fileobj.readline().rstrip() self.assertEqual(response, pair[1]) server.stop()
def run(self): servers = [] ssl_args = {} if self.cfg.is_ssl: ssl_args = dict(server_side=True, do_handshake_on_connect=False, **self.cfg.ssl_options) for s in self.sockets: s.setblocking(1) pool = Pool(self.worker_connections) if self.server_class is not None: server = self.server_class( s, application=self.wsgi, spawn=pool, log=self.log, handler_class=self.wsgi_handler, **ssl_args) else: hfun = partial(self.handle, s) server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args) server.start() servers.append(server) pid = os.getpid() try: while self.alive: self.notify() if pid == os.getpid() and self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) break gevent.sleep(1.0) except KeyboardInterrupt: pass try: # Stop accepting requests [server.stop_accepting() for server in servers] # Handle current requests until graceful_timeout ts = time.time() while time.time() - ts <= self.cfg.graceful_timeout: accepting = 0 for server in servers: if server.pool.free_count() != server.pool.size: accepting += 1 # if no server is accepting a connection, we can exit if not accepting: return self.notify() gevent.sleep(1.0) # Force kill all active the handlers self.log.warning("Worker graceful timeout (pid:%s)" % self.pid) [server.stop(timeout=1) for server in servers] except: pass
def _run_all_tests(self): log = [] def handle(socket, address): while True: data = socket.recv(1024) print('got %r' % data) if not data: break log.append(data) server = StreamServer(self.args[1], handle) server.start() try: conn = socket.create_connection(('127.0.0.1', 10011)) conn.sendall(b'msg1') sleep(0.1) # On Windows, SIGTERM actually abruptly terminates the process; # it can't be caught. However, CTRL_C_EVENT results in a KeyboardInterrupt # being raised, so we can shut down properly. self.popen.send_signal(getattr(signal, 'CTRL_C_EVENT') if hasattr(signal, 'CTRL_C_EVENT') else signal.SIGTERM) sleep(0.1) conn.sendall(b'msg2') conn.close() with gevent.Timeout(2.1): self.popen.wait() finally: server.close() self.assertEqual([b'msg1', b'msg2'], log)
def test_login(self): """Tests if the SMTP bait can login to the SMTP capability""" sessions = {} options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'banner': 'Test'}, 'users': {'test': 'test'}} cap = hive_smtp.smtp(sessions, options, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() bee_info = { 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1', 'local_hostname': 'testhost', 'honeypot_id': '1234' } beesessions = {} BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bait = bee_smtp.smtp(beesessions, bee_info) current_bait.connect() current_bait.login(bee_info['username'], bee_info['password']) srv.stop()
def test_login(self): """Tests if the POP3 bee can login to the POP3 capability""" sessions = {} users = {"test": BaitUser("test", "test")} authenticator = Authenticator(users) Session.authenticator = authenticator options = {"enabled": "True", "port": 0, "max_attempts": 3} cap = honeypot_pop3.Pop3(sessions, options, users, self.work_dir) srv = StreamServer(("0.0.0.0", 0), cap.handle_session) srv.start() bait_info = { "timing": "regular", "username": "******", "password": "******", "port": srv.server_port, "server": "127.0.0.1", } baitsessions = {} BaitSession.client_id = "f51171df-c8f6-4af4-86c0-f4e163cf69e8" current_bee = client_pop3.pop3(baitsessions, bait_info) current_bee.do_session("127.0.0.1") srv.stop()
def test_login(self): """Tests if the POP3 bait can login to the POP3 capability""" sessions = {} options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'max_attempts': 3}, 'users': {'test': 'test'}} cap = honeypot_pop3.Pop3(sessions, options, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() bait_info = { 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1', 'honeypot_id': '1234' } baitsessions = {} BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bait = client_pop3.pop3(baitsessions, bait_info) current_bait.start() srv.stop()
def __init__(self, host, port): self.behavior = Dummy() self.behavior_name = 'dummy' self.host = gethostbyname(host) self.port = port location = self.host, self.port StreamServer.__init__(self, location)
def run(self): server = StreamServer(self.addr, self.app) try: self.logger.info('Maria System Start at %s:%s' % self.addr) server.serve_forever() except KeyboardInterrupt: self.logger.info('Maria System Stopped')
def __init__(self, listener, application=None, backlog=None, spawn='default', log='default', error_log='default', handler_class=None, environ=None, **ssl_args): StreamServer.__init__(self, listener, backlog=backlog, spawn=spawn, **ssl_args) if application is not None: self.application = application if handler_class is not None: self.handler_class = handler_class # Note that we can't initialize these as class variables: # sys.stderr might get monkey patched at runtime. def _make_log(l, level=20): if l == 'default': return sys.stderr if l is None: return _NoopLog() if not hasattr(l, 'write') and hasattr(l, 'log'): return LoggingLogAdapter(l, level) return l self.log = _make_log(log) self.error_log = _make_log(error_log, 40) # logging.ERROR self.set_environ(environ) self.set_max_accept()
def test_list(self): """Tests the FTP LIST command""" options = {'enabled': 'True', 'port': 0, 'banner': 'Test Banner', 'protocol_specific_data': {'max_attempts': 3, 'banner': 'test banner', 'syst_type': 'Test Type'}, 'users': {'test': 'test'}} cap = honeypot_ftp.ftp(options, self.work_dir) srv = StreamServer(('0.0.0.0', 0), cap.handle_session) srv.start() bee_info = { 'enabled': True, 'timing': 'regular', 'username': '******', 'password': '******', 'port': srv.server_port, 'server': '127.0.0.1' } beesessions = {} BaitSession.client_id = 'f51171df-c8f6-4af4-86c0-f4e163cf69e8' current_bee = bee_ftp.Ftp(bee_info) current_bee.connect() current_bee.login(bee_info['username'], bee_info['password']) current_bee.list() self.assertGreater(len(current_bee.state['file_list']), 0) self.assertGreater(len(current_bee.state['dir_list']), 0) srv.stop()
class ConnectionServer: def __init__(self, ip=None, port=None, request_handler=None): self.ip = ip self.port = port self.last_connection_id = 1 # Connection id incrementer self.log = logging.getLogger("ConnServer") self.connections = [] # Connections self.ips = {} # Connection by ip self.peer_ids = {} # Connections by peer_ids self.running = True self.thread_checker = gevent.spawn(self.checkConnections) self.zmq_running = False self.zmq_last_connection = None # Last incoming message client self.peer_id = "-ZN0"+config.version.replace(".", "")+"-"+''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(12)) # Bittorrent style peerid if port: # Listen server on a port self.zmq_port = port-1 self.pool = Pool(1000) # do not accept more than 1000 connections self.stream_server = StreamServer((ip.replace("*", ""), port), self.handleIncomingConnection, spawn=self.pool, backlog=100) if request_handler: self.handleRequest = request_handler gevent.spawn(self.zmqServer) # Start ZeroMQ server for backward compatibility def start(self): self.running = True try: self.log.debug("Binding to: %s:%s (msgpack: %s)" % (self.ip, self.port, ".".join(map(str, msgpack.version)))) self.stream_server.serve_forever() # Start normal connection server except Exception, err: self.log.info("StreamServer bind error, must be running already: %s" % err)
#!/usr/bin/env python # -*- coding:UTF-8 from gevent import socket from gevent.server import StreamServer def handle_echo(sock, address): fp = sock.makefile() while True: line = fp.readline() if line: fp.write(line) fp.flush() else: break sock.shutdown(socket.SHUT_WR) sock.close() server = StreamServer( ('', 1234), handle_echo) server.serve_forever()
from gevent.server import StreamServer def handle(socket, address): socket.send("Hello from a telnet!\n") while True: data = socket.recv(20) if "end" in data: break socket.send("SERVER SAYS: " + data.upper()) socket.send("bye!") socket.close() server = StreamServer(('127.0.0.1', 5000), handle) server.serve_forever()
def _run(self): logger.info("Observer Listen at port {0}".format(self.port)) server = StreamServer(('0.0.0.0', self.port), self._connection_handler) server.serve_forever()
class Server(object): def __init__(self, address, standalone): try: os.unlink(address) except OSError: if os.path.exists(address): raise self.address = address self.standalone = standalone self.server = StreamServer(self._serve(), self._handle) def _serve(self): listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) listener.bind(self.address) listener.listen(1) return listener def _handle(self, sock, address): try: messager = Messager(sock) buf = sock.recv(4) sz = utils.bytes2int(buf) buf = utils.recv_all(sock, sz) req = json.loads(buf) if not 'command' in req: messager.error("Could not find command") return if not 'module' in req: messager.error("Please specify the request module") return if not 'cwd' in req: messager.error("Please specify the cwd parameter") return manager_func = xcat_manager.BaseManager.get_manager_func( req['module']) if manager_func is None: messager.error("Could not find manager for %s" % req['module']) return nodes = req.get("nodes", None) manager = manager_func(messager, req['cwd'], nodes, req['envs']) if not hasattr(manager, req['command']): messager.error("command %s is not supported" % req['command']) func = getattr(manager, req['command']) # call the function in the specified manager func(req['nodeinfo'], req['args']) # after the method returns, the request should be handled # completely, close the socket for client if not self.standalone: sock.close() self.server.stop() os._exit(0) except Exception: print(traceback.format_exc(), file=sys.stderr) self.server.stop() os._exit(1) def keep_peer_alive(self): def acquire(): fd = open(LOCK_FILE, "r+") fcntl.flock(fd.fileno(), fcntl.LOCK_EX) # if reach here, parent process may exit print("xcat process exit unexpectedly.", file=sys.stderr) self.server.stop() os._exit(1) t = threading.Thread(target=acquire) t.start() def start(self): if not self.standalone: self.keep_peer_alive() self.server.serve_forever()
ciphers=ciphers, ssl_version=ssl.PROTOCOL_TLSv1) else: sock.sendall(data) except Exception, err: print err try: sock.shutdown(gevent.socket.SHUT_WR) sock.close() except: pass socks.remove(sock_raw) pool = Pool(1000) # do not accept more than 10000 connections server = StreamServer(('127.0.0.1', 1234), handle) server.start() # Client total_num = 0 total_bytes = 0 clipher = None ciphers = "ECDHE-ECDSA-AES128-GCM-SHA256:ECDH+AES128:ECDHE-RSA-AES128-GCM-SHA256:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:HIGH:" + \ "!aNULL:!eNULL:!EXPORT:!DSS:!DES:!RC4:!3DES:!MD5:!PSK" # ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) def getData(): global total_num, total_bytes, clipher
#decode and get user auth_key = urllib.unquote(auth_key).strip() user = User.objects.get( pk=decode_signed_value(config.SERVER_SECRET, "auth_key", auth_key)) for handler in request_handlers: args = handler[0].match(request_path) func = handler[1] kwargs = {"user": user} if (post_data != None): kwargs["post"] = post_data if (args != None): fargs = args.groups() if (fargs): func(socket, *fargs, **kwargs) else: func(socket, **kwargs) if __name__ == "__main__": initDb() #initialize reading and file decoder #keep reading streams , auto reconnecting init_main_audio_streams() init_stream_event_handlers() server = StreamServer(('', 8888), handle_connection) server.serve_forever()
def __init__(self): super(MessageHandler, self).__init__() self.server = StreamServer( self.bind_unix_listener(), self.read_socket )
def __init__(self, listener, handle, spawn='default', worker=None): StreamServer.__init__(self, listener, spawn=spawn) self.handle_func = handle self.worker = worker
def __init__(self, address): StreamServer(address, self.handle).serve_forever()
class Service(object): def __init__(self, shard=0): signal.signal(signal.SIGINT, lambda unused_x, unused_y: self.exit()) self.name = self.__class__.__name__ self.shard = shard self._my_coord = ServiceCoord(self.name, self.shard) # Dictionaries of (to be) connected RemoteServiceClients. self.remote_services = {} self.initialize_logging() # We setup the listening address for services which want to # connect with us. try: address = get_service_address(self._my_coord) except KeyError: raise ConfigError("Unable to find address for service %r. " "Is it specified in core_services in cms.conf?" % (self._my_coord, )) self.rpc_server = StreamServer(address, self._connection_handler) self.backdoor = None def initialize_logging(self): """Set up additional logging handlers. What we do, in detail, is to add a logger to file (whose filename depends on the coords) and a remote logger to a LogService. We also attach the service coords to all log messages. """ filter_ = ServiceFilter(self.name, self.shard) # Update shell handler to attach service coords. shell_handler.addFilter(filter_) # Determine location of log file, and make directories. log_dir = os.path.join(config.log_dir, "%s-%d" % (self.name, self.shard)) mkdir(config.log_dir) mkdir(log_dir) log_filename = time.strftime("%Y-%m-%d-%H-%M-%S.log") # Install a file handler. file_handler = FileHandler(os.path.join(log_dir, log_filename), mode='w', encoding='utf-8') if config.file_log_debug: file_log_level = logging.DEBUG else: file_log_level = logging.INFO file_handler.setLevel(file_log_level) file_handler.setFormatter(DetailedFormatter(False)) file_handler.addFilter(filter_) root_logger.addHandler(file_handler) # Provide a symlink to the latest log file. try: os.remove(os.path.join(log_dir, "last.log")) except OSError: pass os.symlink(log_filename, os.path.join(log_dir, "last.log")) # Setup a remote LogService handler (except when we already are # LogService, to avoid circular logging). if self.name != "LogService": log_service = self.connect_to(ServiceCoord("LogService", 0)) remote_handler = LogServiceHandler(log_service) remote_handler.setLevel(logging.INFO) remote_handler.addFilter(filter_) root_logger.addHandler(remote_handler) def _connection_handler(self, sock, address): """Receive and act upon an incoming connection. A new RemoteServiceServer is spawned to take care of the new connection. """ try: ipaddr, port = address ipaddr = gevent.socket.gethostbyname(ipaddr) address = Address(ipaddr, port) except socket.error: logger.warning("Unexpected error.", exc_info=True) return remote_service = RemoteServiceServer(self, address) remote_service.handle(sock) def connect_to(self, coord, on_connect=None, on_disconnect=None, must_be_present=True): """Return a proxy to a remote service. Obtain a communication channel to the remote service at the given coord (reusing an existing one, if possible), attach the on_connect and on_disconnect handlers and return it. coord (ServiceCoord): the coord of the service to connect to. on_connect (function|None): to be called when the service connects. on_disconnect (function|None): to be called when it disconnects. must_be_present (bool): if True, the coord must be present in the configuration; otherwise, it can be missing and in that case the return value is a fake client (that is, a client that never connects and ignores all calls). return (RemoteServiceClient): a proxy to that service. """ if coord not in self.remote_services: try: service = RemoteServiceClient(coord, auto_retry=0.5) except KeyError: # The coordinates are invalid: raise a ConfigError if # the service was needed, or return a dummy client if # the service was optional. if must_be_present: raise ConfigError("Missing address and port for %s " "in cms.conf." % (coord, )) else: service = FakeRemoteServiceClient(coord, None) service.connect() self.remote_services[coord] = service else: service = self.remote_services[coord] if on_connect is not None: service.add_on_connect_handler(on_connect) if on_disconnect is not None: service.add_on_disconnect_handler(on_disconnect) return service def add_timeout(self, func, plus, seconds, immediately=False): """Register a function to be called repeatedly. func (function): the function to call. plus (object): additional data to pass to the function. seconds (float): the minimum interval between successive calls (may be larger if a call doesn't return on time). immediately (bool): whether to call right off or wait also before the first call. """ if plus is None: plus = {} func = functools.partial(func, **plus) if immediately: gevent.spawn(repeater, func, seconds) else: gevent.spawn_later(seconds, repeater, func, seconds) def exit(self): """Terminate the service at the next step. """ logger.warning("%r received request to shut down.", self._my_coord) self.rpc_server.stop() def get_backdoor_path(self): """Return the path for a UNIX domain socket to use as backdoor. """ return os.path.join(config.run_dir, "%s_%d" % (self.name, self.shard)) @rpc_method def start_backdoor(self, backlog=50): """Start a backdoor server on a local UNIX domain socket. """ backdoor_path = self.get_backdoor_path() try: os.remove(backdoor_path) except OSError as error: if error.errno != errno.ENOENT: raise else: logger.warning("A backdoor socket has been found and deleted.") mkdir(os.path.dirname(backdoor_path)) backdoor_sock = gevent.socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) backdoor_sock.setblocking(0) backdoor_sock.bind(backdoor_path) user = pwd.getpwnam(config.cmsuser) # We would like to also set the user to "cmsuser" but only root # can do that. Therefore we limit ourselves to the group. os.chown(backdoor_path, os.getuid(), user.pw_gid) os.chmod(backdoor_path, 0o770) backdoor_sock.listen(backlog) self.backdoor = BackdoorServer(backdoor_sock, locals={'service': self}) self.backdoor.start() @rpc_method def stop_backdoor(self): """Stop a backdoor server started by start_backdoor. """ if self.backdoor is not None: self.backdoor.stop() backdoor_path = self.get_backdoor_path() try: os.remove(backdoor_path) except OSError as error: if error.errno != errno.ENOENT: raise def run(self): """Starts the main loop of the service. return (bool): True if successful. """ try: self.rpc_server.start() # This must come before socket.error, because socket.gaierror # extends socket.error except socket.gaierror: logger.critical( "Service %s could not listen on " "specified address, because it cannot " "be resolved.", self.name) return False except socket.error as error: if error.errno == errno.EADDRINUSE: logger.critical( "Listening port %s for service %s is " "already in use, quitting.", self.rpc_server.address.port, self.name) return False elif error.errno == errno.EADDRNOTAVAIL: logger.critical( "Service %s could not listen on " "specified address, because it is not " "available.", self.name) return False else: raise if config.backdoor: self.start_backdoor() logger.info("%s %d up and running!", *self._my_coord) # This call will block until self.rpc_server.stop() is called. self.rpc_server.serve_forever() logger.info("%s %d is shutting down", *self._my_coord) if config.backdoor: self.stop_backdoor() self._disconnect_all() return True def _disconnect_all(self): """Disconnect all remote services. """ for service in itervalues(self.remote_services): if service.connected: service.disconnect() @rpc_method def echo(self, string): """Simple RPC method. string (string): the string to be echoed. return (string): string, again. """ return string @rpc_method def quit(self, reason=""): """Shut down the service reason (string): why, oh why, you want me down? """ logger.info("Trying to exit as asked by another service (%s).", reason) self.exit()
def server(): ss = StreamServer(('localhost', PORT), serve).serve_forever()
class TestBase(unittest.TestCase): def setUp(self): # clean up before we start... conpot_core.get_sessionManager().purge_sessions() self.databus = conpot_core.get_databus() self.databus.initialize('conpot/templates/default.xml') modbus = modbus_server.ModbusServer('conpot/templates/default.xml', timeout=2) self.modbus_server = StreamServer(('127.0.0.1', 0), modbus.handle) self.modbus_server.start() def tearDown(self): self.modbus_server.stop() # tidy up (again)... conpot_core.get_sessionManager().purge_sessions() def test_read_coils(self): """ Objective: Test if we can extract the expected bits from a slave using the modbus protocol. """ self.databus.set_value('memoryModbusSlave1BlockA', [1 for b in range(0, 128)]) master = modbus_tcp.TcpMaster(host='127.0.0.1', port=self.modbus_server.server_port) master.set_timeout(1.0) actual_bits = master.execute(slave=1, function_code=cst.READ_COILS, starting_address=1, quantity_of_x=128) #the test template sets all bits to 1 in the range 1-128 expected_bits = [1 for b in range(0, 128)] self.assertSequenceEqual(actual_bits, expected_bits) def test_write_read_coils(self): """ Objective: Test if we can change values using the modbus protocol. """ master = modbus_tcp.TcpMaster(host='127.0.0.1', port=self.modbus_server.server_port) master.set_timeout(1.0) set_bits = [1, 0, 0, 1, 0, 0, 1, 1] #write 8 bits master.execute(1, cst.WRITE_MULTIPLE_COILS, 1, output_value=set_bits) #read 8 bit actual_bit = master.execute(slave=1, function_code=cst.READ_COILS, starting_address=1, quantity_of_x=8) self.assertSequenceEqual(set_bits, actual_bit) def test_read_nonexistent_slave(self): """ Objective: Test if the correct exception is raised when trying to read from nonexistent slave. """ master = modbus_tcp.TcpMaster(host='127.0.0.1', port=self.modbus_server.server_port) master.set_timeout(1.0) with self.assertRaises(ModbusError) as cm: master.execute(slave=5, function_code=cst.READ_COILS, starting_address=1, quantity_of_x=1) self.assertEqual(cm.exception.get_exception_code(), cst.SLAVE_DEVICE_FAILURE) def test_modbus_logging(self): """ Objective: Test if modbus generates log messages as expected. Expected output is a dictionary with the following structure: {'timestamp': datetime.datetime(2013, 4, 23, 18, 47, 38, 532960), 'remote': ('127.0.0.1', 60991), 'data_type': 'modbus', 'id': '01bd90d6-76f4-43cb-874f-5c8f254367f5', 'data': {'function_code': 1, 'slave_id': 1, 'request': '0100010080', 'response': '0110ffffffffffffffffffffffffffffffff'}} """ self.databus.set_value('memoryModbusSlave1BlockA', [1 for b in range(0, 128)]) master = modbus_tcp.TcpMaster(host='127.0.0.1', port=self.modbus_server.server_port) master.set_timeout(1.0) #issue request to modbus server master.execute(slave=1, function_code=cst.READ_COILS, starting_address=1, quantity_of_x=128) #extract the generated logentry log_queue = conpot_core.get_sessionManager().log_queue log_item = log_queue.get(True, 2) self.assertIsInstance(log_item['timestamp'], datetime) self.assertTrue('data' in log_item) # we expect session_id to be 36 characters long (32 x char, 4 x dashes) self.assertTrue(len(str(log_item['id'])), log_item) self.assertEqual('127.0.0.1', log_item['remote'][0]) self.assertEquals('modbus', log_item['data_type']) #testing the actual modbus data expected_payload = { 'function_code': 1, 'slave_id': 1, 'request': '000100000006010100010080', 'response': '0110ffffffffffffffffffffffffffffffff' } self.assertDictEqual(expected_payload, log_item['data'])
def __init__(self, listener, application=None, backlog=None, spawn='default', flow=BaseFlow, **ssl_args): StreamServer.__init__(self, listener, backlog=backlog, spawn=spawn, **ssl_args) self.flow = flow self.getLog( )
def run(self): servers = [] ssl_args = {} if self.cfg.is_ssl: ssl_args = dict(server_side=True, do_handshake_on_connect=False, **self.cfg.ssl_options) for s in self.sockets: s.setblocking(1) pool = Pool(self.worker_connections) if self.server_class is not None: server = self.server_class(s, application=self.wsgi, spawn=pool, log=self.log, handler_class=self.wsgi_handler, **ssl_args) else: hfun = partial(self.handle, s) server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args) server.start() servers.append(server) try: while self.alive: self.notify() gevent.sleep(1.0) except KeyboardInterrupt: pass except: try: server.stop() except: pass raise try: # Stop accepting requests for server in servers: if hasattr(server, 'close'): # gevent 1.0 server.close() if hasattr(server, 'kill'): # gevent < 1.0 server.kill() # Handle current requests until graceful_timeout ts = time.time() while time.time() - ts <= self.cfg.graceful_timeout: accepting = 0 for server in servers: if server.pool.free_count() != server.pool.size: accepting += 1 # if no server is accepting a connection, we can exit if not accepting: return self.notify() gevent.sleep(1.0) # Force kill all active the handlers self.log.warning("Worker graceful timeout (pid:%s)" % self.pid) [server.stop(timeout=1) for server in servers] except: pass
def __init__(self, handler): self.handler = handler self.result = AsyncResult() self.server = StreamServer(('127.0.0.1', 0), self)
def run(self): if gunicorn_version >= (0, 17, 0): servers = [] ssl_args = {} if self.cfg.is_ssl: ssl_args = dict(server_side=True, do_handshake_on_connect=False, **self.cfg.ssl_options) for s in self.sockets: s.setblocking(1) pool = Pool(self.worker_connections) if self.server_class is not None: self.server_class.base_env['wsgi.multiprocess'] = \ self.cfg.workers > 1 server = self.server_class( s, application=self.wsgi, spawn=pool, resource=self.resource, log=self.log, policy_server=self.policy_server, handler_class=self.wsgi_handler, ws_handler_class=self.ws_wsgi_handler, **ssl_args) else: hfun = partial(self.handle, s) server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args) server.start() servers.append(server) pid = os.getpid() try: while self.alive: self.notify() if pid == os.getpid() and self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) break gevent.sleep(1.0) except KeyboardInterrupt: pass try: # Stop accepting requests [server.stop_accepting() for server in servers] # Handle current requests until graceful_timeout ts = time.time() while time.time() - ts <= self.cfg.graceful_timeout: accepting = 0 for server in servers: if server.pool.free_count() == server.pool.size: accepting += 1 if not accepting: return self.notify() gevent.sleep(1.0) # Force kill all active the handlers self.log.warning("Worker graceful timeout (pid:%s)" % self.pid) server.stop(timeout=1) except: pass else: self.socket.setblocking(1) pool = Pool(self.worker_connections) self.server_class.base_env['wsgi.multiprocess'] = \ self.cfg.workers > 1 server = self.server_class( self.socket, application=self.wsgi, spawn=pool, resource=self.resource, log=self.log, policy_server=self.policy_server, handler_class=self.wsgi_handler, ws_handler_class=self.ws_wsgi_handler, ) server.start() pid = os.getpid() try: while self.alive: self.notify() if pid == os.getpid() and self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) break gevent.sleep(1.0) except KeyboardInterrupt: pass try: # Stop accepting requests server.kill() # Handle current requests until graceful_timeout ts = time.time() while time.time() - ts <= self.cfg.graceful_timeout: if server.pool.free_count() == server.pool.size: return # all requests was handled self.notify() gevent.sleep(1.0) # Force kill all active the handlers self.log.warning("Worker graceful timeout (pid:%s)" % self.pid) server.stop(timeout=1) except: pass
st_time = right_now count += 1 return "foo" channels = [] def foo(sock): gevent.socket.wait_read(sock.fileno()) print sock.recv(1024 * 1024) def connect_handler(sock, address): print "got connection", address #gevent.spawn(foo, sock) #foo(sock) # global channels ch = mbus.ServerChannel(sock, func) ch.start() #channels.append(ch) if __name__ == '__main__': s = StreamServer(('localhost', 9090), connect_handler) s.serve_forever()
def Run(self): print("Starting server") self.server = StreamServer(('127.0.0.1', self.port), self.handle) self.server.serve_forever()
def close(self): if self.closed: sys.exit('Multiple exit signals received - aborting.') else: log('Closing listener socket') StreamServer.close(self)
class Server(object): def __init__(self, host='127.0.0.1', port=31337, max_clients=64): self._pool = Pool(max_clients) self._server = StreamServer( (host, port), self.connection_handler, spawn=self._pool) self._protocol = ProtocolHandler() self._kv = {} self._commands = self.get_commands() def get_commands(self): return { 'GET': self.get, 'SET': self.set, 'DELETE': self.delete, 'FLUSH': self.flush, 'MGET': self.mget, 'MSET': self.mset} def connection_handler(self, conn, address): logger.info('Connection received: %s:%s' % address) # Convert "conn" (a socket object) into a file-like object. socket_file = conn.makefile('rwb') # Process client requests until client disconnects. while True: try: data = self._protocol.handle_request(socket_file) except Disconnect: logger.info('Client went away: %s:%s' % address) break try: resp = self.get_response(data) except CommandError as exc: logger.exception('Command error') resp = Error(exc.args[0]) self._protocol.write_response(socket_file, resp) def run(self): self._server.serve_forever() def get_response(self, data): if not isinstance(data, list): try: data = data.split() except: raise CommandError('Request must be list or simple string.') if not data: raise CommandError('Missing command') command = data[0].upper() if command not in self._commands: raise CommandError('Unrecognized command: %s' % command) else: logger.debug('Received %s', command) return self._commands[command](*data[1:]) def get(self, key): return self._kv.get(key) def set(self, key, value): self._kv[key] = value return 1 def delete(self, key): if key in self._kv: del self._kv[key] return 1 return 0 def flush(self): kvlen = len(self._kv) self._kv.clear() return kvlen def mget(self, *keys): return [self._kv.get(key) for key in keys] def mset(self, *items): data = zip(items[::2], items[1::2]) for key, value in data: self._kv[key] = value return len(data)
def stop(self, timeout=None): StreamServer.stop(self, timeout) AServer.stop(self)
import gevent # sleeptime = 60 def handle(socket, address): # print(address) # data = socket.recv(1024) # print(data) while True: gevent.sleep(sleeptime) try: socket.send("ok") except Exception as e: print(e) if __name__ == "__main__": import sys port = 80 if len(sys.argv) > 2: port = int(sys.argv[1]) sleeptime = int(sys.argv[2]) else: print("需要两个参数!!") sys.exit(1) # default backlog is 256 server = StreamServer(('0.0.0.0', port), handle, backlog=4096) server.serve_forever()
'MAIN' if g is MAIN else repr(g).decode('utf-8'), rec.msg % rec.args if isinstance(rec.msg, basestring) else repr((rec.msg, rec.args)), ) fmter = ServerLogFormatter() root = logging.getLogger() root.setLevel(getattr(logging, options.log.upper())) std = logging.StreamHandler(stream=sys.stdout) std.setFormatter(fmter) root.handlers = [] root.addHandler(std) if options.logfile: from logging.handlers import WatchedFileHandler filehdlr = WatchedFileHandler(options.logfile) filehdlr.setFormatter(fmter) root.addHandler(filehdlr) if not options.no_backdoor: from gevent.backdoor import BackdoorServer gevent.spawn(BackdoorServer(('127.0.0.1', options.backdoor_port)).serve_forever) from server.core import Client root.info('=' * 20 + settings.VERSION + '=' * 20) server = StreamServer(('0.0.0.0', options.port), Client.spawn, None) server.serve_forever()
def __init__(self, listener, dest, **kwargs): StreamServer.__init__(self, listener, **kwargs) self.dest = dest
def __init__(self, listener, queue, pool=None, hostname=None): super(EdgeServer, self).__init__(queue, hostname) spawn = pool or 'default' self.server = StreamServer(listener, self._handle, spawn=spawn)
def start(argv): try: opts, args = getopt.getopt(argv, 'sca:p:', ['server','client','address','port']) except getopt.GetoptError, e: print ('Error: %r' % e) sys.exit(1) server = True address = '127.0.0.1' port = 6060 for o, a in opts: if o in ('-s', '--server'): server = True elif o in ('-c', '--client'): server = False elif o in ('-a', '--address'): address = a elif o in ('-p', '--port'): port = a else: assert False, 'unhandled option' if server: server = StreamServer((address, port), p3p.server.serve) print ('Starting server on %s:%d' % (address, port)) server.serve_forever() else: print ('%r' % p3p.client.ask((address, port), args))
def _run(self): s = StreamServer(('0.0.0.0', self.port), self.connection_handler) s.serve_forever()
def server_loop(self): server = StreamServer( (FLAGS.ofp_listen_host, FLAGS.ofp_tcp_listen_port), datapath_connection_factory) #LOG.debug('loop') server.serve_forever()
class EndpointProcessor(Actor): def init(self, parameters, resources): self.isOpen = True self.nConnected = 0 self.handlerPortStart = parameters.get('handler_port_start', 10000) self.handlerPortEnd = parameters.get('handler_port_end', 20000) self.bindAddress = parameters.get('handler_address', '0.0.0.0') self.bindInterface = parameters.get('handler_interface', None) self.sensorMaxQps = parameters.get('sensor_max_qps', 30) if self.bindInterface is not None: ip4 = self.getIpv4ForIface(self.bindInterface) if ip4 is not None: self.bindAddress = ip4 elif '0.0.0.0' == self.bindAddress: self.bindAddress = self.getIpv4ForIface( self.getPublicInterfaces()[0]) self.r = rpcm(isHumanReadable=True) self.r.loadSymbols(Symbols.lookups) self.analyticsIntake = self.getActorHandle(resources['analytics'], nRetries=3) self.enrollmentManager = self.getActorHandle(resources['enrollments'], nRetries=3) self.stateChanges = self.getActorHandleGroup(resources['states'], nRetries=3) self.sensorDir = self.getActorHandle(resources['sensordir'], nRetries=3) self.moduleManager = self.getActorHandle(resources['module_tasking'], nRetries=3) self.hbsProfileManager = self.getActorHandle(resources['hbs_profiles'], nRetries=3) self.deploymentManager = self.getActorHandle(resources['deployment'], nRetries=3) self.tagging = self.getActorHandle(resources['tagging'], nRetries=3) self.privateKey = parameters.get('_priv_key', None) self.privateCert = parameters.get('_priv_cert', None) self.sslContext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) if self.privateKey is None or self.privateCert is None: resp = self.deploymentManager.request('get_c2_cert', {}, timeout=300) if resp.isSuccess: self.privateKey = resp.data['key'] self.privateCert = resp.data['cert'] tmpHandle, tmpPathKey = tempfile.mkstemp() with open(tmpPathKey, 'wb') as f: f.write(self.privateKey) os.close(tmpHandle) tmpHandle, tmpPathCert = tempfile.mkstemp() with open(tmpPathCert, 'wb') as f: f.write(self.privateCert) os.close(tmpHandle) self.log('got keys from deployment manager') self.sslContext.load_cert_chain(certfile=tmpPathCert, keyfile=tmpPathKey) os.unlink(tmpPathKey) os.unlink(tmpPathCert) else: raise Exception( 'no cert specified in parameters or through deployment manager' ) else: self.log('got keys from disk') self.sslContext.load_cert_chain(certfile=self.privateCert, keyfile=self.privateKey) self.sslContext.set_ciphers('ECDHE-RSA-AES128-GCM-SHA256') self.handle('task', self.taskClient) self.handle('report', self.report) self.handle('add_tag', self.addTag) self.handle('del_tag', self.delTag) self.server = None self.serverPort = random.randint(self.handlerPortStart, self.handlerPortEnd) self.currentClients = {} self.moduleHandlers = { HcpModuleId.HCP: self.handlerHcp, HcpModuleId.HBS: self.handlerHbs } self.processedCounter = 0 self.startServer() def deinit(self): if self.server is not None: self.server.close() def drain(self): # Stop accepting new connections. if self.server is not None: self.server.close() # Ask all the clients to nicely disconnect. for aid, c in self.currentClients.items(): try: c.sendFrame(HcpModuleId.HCP, (rSequence().addInt8( Symbols.base.OPERATION, HcpOperations.DISCONNECT), )) except: pass # Wait for everyone to be out. while 0 != self.nConnected: self.log("still %d clients connected" % self.nConnected) self.sleep(5) def startServer(self): if self.server is not None: self.server.close() while True: try: self.server = StreamServer((self.bindAddress, self.serverPort), self.handleNewClient) self.server.start() self.log('Starting server on port %s' % self.serverPort) break except: self.serverPort = random.randint(self.handlerPortStart, self.handlerPortEnd) def getIpv4ForIface(self, iface): ip = None try: ip = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr'] except: ip = None return ip def getPublicInterfaces(self): interfaces = [] for iface in netifaces.interfaces(): ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, []) for entry in ipv4s: addr = entry.get('addr') if not addr: continue if not (iface.startswith('lo') or addr.startswith('127.')): interfaces.append(iface) break return interfaces #========================================================================== # Client Handling #========================================================================== def handleNewClient(self, socket, address): if not self.isOpen: return self.nConnected += 1 aid = None tmpBytesReceived = 0 bufferedOutput = None self.log('New connection from %s:%s' % address) try: c = _ClientContext(self, socket) moduleId, headers, _ = c.recvFrame(timeout=30.0) if HcpModuleId.HCP != moduleId: raise DisconnectException('Headers not from expected module') if headers is None: raise DisconnectException('Error deserializing headers') headers = headers[0] self.log('Headers decoded, validating connection') hostName = headers.get('base.HOST_NAME', None) internalIp = headers.get('base.IP_ADDRESS', None) hcpHash = headers.get('base.HASH', None) hcpCrashContext = headers.get('hcp.CRASH_CONTEXT', None) if hcpCrashContext is not None: self.zInc('cc_received') # Use the address in the client context since it was received from the # proxy headers and therefore is the correct original source. externalIp = c.address[0] c.hostName = hostName c.int_ip = internalIp c.ext_ip = externalIp aid = AgentId(headers['base.HCP_IDENT']) if aid.org_id is None or aid.ins_id is None or aid.platform is None or aid.architecture is None: aidInfo = str(aid) if 0 == len(aidInfo): aidInfo = str(headers) raise DisconnectException('Invalid sensor id: %s' % aidInfo) if aid.sensor_id is None: self.log('Sensor requires enrollment') resp = self.enrollmentManager.request('enroll', { 'aid': aid.asString(), 'public_ip': externalIp, 'internal_ip': internalIp, 'host_name': hostName }, timeout=30) if not resp.isSuccess or 'aid' not in resp.data or resp.data[ 'aid'] is None: raise DisconnectException( 'Sensor could not be enrolled, come back later') aid = AgentId(resp.data['aid']) enrollmentToken = resp.data['token'] confBuffer = resp.data['conf'] confBufferSig = resp.data['conf_sig'] self.log('Sending sensor enrollment to %s' % aid.asString()) c.sendFrame(HcpModuleId.HCP, (rSequence().addInt8( Symbols.base.OPERATION, HcpOperations.SET_HCP_CONF).addBuffer( Symbols.hcp.CONFIGURATION, confBuffer).addBuffer( Symbols.base.SIGNATURE, confBufferSig), rSequence().addInt8( Symbols.base.OPERATION, HcpOperations.SET_HCP_ID).addSequence( Symbols.base.HCP_IDENT, aid.toJson()).addBuffer( Symbols.hcp.ENROLLMENT_TOKEN, enrollmentToken))) confBuffer = None confBufferSig = None else: enrollmentToken = headers.get('hcp.ENROLLMENT_TOKEN', None) resp = self.enrollmentManager.request('authorize', { 'aid': aid.asString(), 'token': enrollmentToken, 'hash': hcpHash }, timeout=10) if not resp.isSuccess or not resp.data.get( 'is_authorized', False): raise DisconnectException('Could not authorize %s' % aid) self.log('Valid client connection') # Eventually sync the clocks at recurring intervals c.sendFrame(HcpModuleId.HCP, (self.timeSyncMessage(), )) c.setAid(aid) self.currentClients[aid.sensor_id] = c self.zSet('clients', len(self.currentClients)) newStateMsg = { 'aid': aid.asString(), 'endpoint': self.name, 'ext_ip': externalIp, 'int_ip': internalIp, 'hostname': hostName, 'connection_id': c.connId } self.stateChanges.shoot('live', newStateMsg, timeout=30) self.sensorDir.broadcast('live', newStateMsg) del (newStateMsg) resp = self.tagging.request('get_tags', {'sid': aid.sensor_id}, timeout=2) if resp.isSuccess: c.tags = resp.data.get('tags', {}).values()[0].keys() self.log('Retrieved tags %s for %s' % (c.tags, aid.asString())) self.log('Client %s registered, beginning to receive data' % aid.asString()) lastTransferReport = time.time() frameIndex = 0 bufferedOutput = LimitedQPSBuffer( self.sensorMaxQps, cbLog=lambda x: self.log("%s %s" % (aid.asString(), x))) while True: moduleId, messages, nRawBytes = c.recvFrame(timeout=60 * 11) tmpBytesReceived += nRawBytes if 10 == frameIndex: now = time.time() if now > lastTransferReport + (60 * 10): self.sensorDir.broadcast( 'transfered', { 'aid': aid.asString(), 'bytes_transfered': tmpBytesReceived }) self.stateChanges.shoot( 'transfered', { 'aid': aid.asString(), 'bytes_transfered': tmpBytesReceived }) tmpBytesReceived = 0 lastTransferReport = now frameIndex = 0 else: frameIndex += 1 handler = self.moduleHandlers.get(moduleId, None) if handler is None: self.log('Received data for unknown module') else: bufferedOutput.add(handler, c, messages) except Exception as e: if type(e) is not DisconnectException: self.log('Exception while processing: %s' % str(e)) self.log(traceback.format_exc()) raise else: self.log('Disconnecting: %s' % str(e)) finally: if aid is not None: if aid.sensor_id in self.currentClients: del (self.currentClients[aid.sensor_id]) self.sensorDir.broadcast( 'transfered', { 'aid': aid.asString(), 'bytes_transfered': tmpBytesReceived }) self.stateChanges.shoot( 'transfered', { 'aid': aid.asString(), 'bytes_transfered': tmpBytesReceived }) newStateMsg = { 'aid': aid.asString(), 'endpoint': self.name, 'connection_id': c.connId } self.stateChanges.shoot('dead', newStateMsg, timeout=30) self.sensorDir.broadcast('dead', newStateMsg) del (newStateMsg) self.log('Connection terminated: %s' % aid.asString()) self.zSet('clients', len(self.currentClients)) else: self.log('Connection terminated: %s:%s' % address) if bufferedOutput is not None: qSize = bufferedOutput.size() if 0 != qSize: self.log('Waiting for queue of size %s to flush for %s' % (qSize, aid.asString())) bufferedOutput.close() if 0 != qSize: self.log('Queue for %s finished flushing' % aid.asString()) self.nConnected -= 1 def handlerHcp(self, c, messages): for message in messages: if 'hcp.MODULES' in message: moduleUpdateResp = self.moduleManager.request( 'sync', { 'mods': message['hcp.MODULES'], 'aid': c.getAid(), 'tags': c.tags }, timeout=30) if moduleUpdateResp.isSuccess: changes = moduleUpdateResp.data['changes'] tasks = [] for mod in changes['unload']: tasks.append(rSequence().addInt8( Symbols.base.OPERATION, HcpOperations.UNLOAD_MODULE).addInt8( Symbols.hcp.MODULE_ID, mod)) for mod in changes['load']: tasks.append(rSequence().addInt8( Symbols.base.OPERATION, HcpOperations.LOAD_MODULE).addInt8( Symbols.hcp.MODULE_ID, mod[0]).addBuffer( Symbols.base.BINARY, mod[2]).addBuffer(Symbols.base.SIGNATURE, mod[3])) c.sendFrame(HcpModuleId.HCP, tasks) self.log('load %d modules, unload %d modules' % (len(changes['load']), len(changes['unload']))) else: self.log("could not provide module sync: %s" % moduleUpdateResp.error) def handlerHbs(self, c, messages): for i in range(len(messages)): self.processedCounter += 1 if 0 == (self.processedCounter % 1000): self.log('EP_IN %s' % self.processedCounter) for message in messages: # We treat sync messages slightly differently since they need to be actioned # more directly. if 'notification.SYNC' in message: self.log("sync received from %s" % c.getAid()) profileHash = message['notification.SYNC'].get( 'base.HASH', None) profileUpdateResp = self.hbsProfileManager.request( 'sync', { 'hprofile': profileHash, 'aid': c.getAid(), 'tags': c.tags }, timeout=30) if profileUpdateResp.isSuccess and 'changes' in profileUpdateResp.data: profile = profileUpdateResp.data['changes'].get( 'profile', None) if profile is not None: r = rpcm(isHumanReadable=False, isDebug=self.log, isDetailedDeserialize=True) r.setBuffer(profile[0]) realProfile = r.deserialise(isList=True) if realProfile is not None: syncProfile = rSequence().addSequence( Symbols.notification.SYNC, rSequence().addBuffer( Symbols.base.HASH, profile[1].decode('hex')).addList( Symbols.hbs.CONFIGURATIONS, realProfile)) c.sendFrame(HcpModuleId.HBS, (syncProfile, )) self.log("sync profile sent to %s" % c.getAid()) # Transmit the message to the analytics cloud. routing = { 'aid': c.getAid(), 'hostname': c.hostName, 'int_ip': c.int_ip, 'ext_ip': c.ext_ip, 'moduleid': HcpModuleId.HBS, 'event_type': message.keys()[0], 'event_time': message.values()[0].get('base.TIMESTAMP', None), 'event_id': uuid.uuid4(), 'tags': c.tags } invId = message.values()[0].get('hbs.INVESTIGATION_ID', None) if invId is not None: routing['investigation_id'] = invId self.analyticsIntake.shoot('analyze', ((routing, message), ), timeout=600) def timeSyncMessage(self): return (rSequence().addInt8( Symbols.base.OPERATION, HcpOperations.SET_GLOBAL_TIME).addTimestamp( Symbols.base.TIMESTAMP, int(time.time()))) def taskClient(self, msg): aid = AgentId(msg.data['aid']) messages = msg.data['messages'] moduleId = msg.data['module_id'] c = self.currentClients.get(aid.sensor_id, None) if c is not None: outMessages = [] r = rpcm(isHumanReadable=False, isDebug=self.log, isDetailedDeserialize=True) for message in messages: r.setBuffer(message) outMessages.append(r.deserialise(isList=False)) c.sendFrame(moduleId, outMessages, timeout=60 * 10) return (True, ) else: return (False, ) def report(self, msg): return (True, {'address': self.bindAddress, 'port': self.serverPort}) def addTag(self, msg): sid = AgentId(msg.data['sid']).sensor_id tag = msg.data['tag'] c = self.currentClients.get(sid, None) if c is not None: if tag not in c.tags: c.tags.append(tag) return (True, ) return (False, 'sensor not online') def delTag(self, msg): sid = AgentId(msg.data['sid']).sensor_id tag = msg.data['tag'] c = self.currentClients.get(sid, None) if c is not None: if tag in c.tags: try: c.tags.remove(tag) except: pass return (True, ) return (False, 'sensor not online')
break parts = line.split() cmd = parts[0] if cmd == "get": key = parts[1] try: val = CACHE[key] sockfile.write("VALUE %s 0 %d\r\n" % (key, len(val))) sockfile.write(val + "\r\n") except KeyError: pass sockfile.write("END\r\n") sockfile.flush() elif cmd == "set": key = parts[1] length = int(parts[4]) val = sockfile.read(length + 2)[:length] CACHE[key] = val sockfile.write("STORED\r\n") sockfile.flush() if __name__ == "__main__": server = StreamServer(("127.0.0.1", 11211), handle_con) server.serve_forever()