def main(): cp = ConfigParser() cp.read(sys.argv[1]) certfile = cp.get('sslexport', 'pemfile') external = cp.get('sslexport', 'external') internal = '127.0.0.1' # External (SSL) listeners for ext_port in cp.options('sslexport.server'): ipport = cp.get('sslexport.server', ext_port) ip,port = ipport.split(':') server = gevent.server.StreamServer( (external, int(ext_port)), Forwarder(ip, port, gevent.socket.socket), certfile=certfile) server.start() print 'ssl(%s:%s) => clear(%s:%s)' % ( external, ext_port, ip, port) # Internal (non-SSL) listeners for int_port in cp.options('sslexport.client'): ipport = cp.get('sslexport.client', int_port) ip,port = ipport.split(':') server = gevent.server.StreamServer( (internal, int(int_port)), Forwarder(ip, port, lambda:gevent.ssl.SSLSocket(gevent.socket.socket())), certfile=certfile) server.start() print 'clear(%s:%s) => ssl(%s:%s)' % ( internal, int_port, ip, port) while True: gevent.sleep(10) print '--- mark ---'
def test_send_array(self): arrays = [ np.random.rand(7, 1), np.random.rand(10, 2), np.random.rand(10, 2, 4), np.random.rand(10, 1, 3), np.random.rand(10, 4, 3, 1), ] server = SimpleServer(('127.0.0.1', 0), ) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) sleep(0.1) server_socket = Socket(server.do_read()[0]) for array in arrays: server_socket.send_array(array) data_type = client.get_string() data_len = client.get_int() shape_len = client.get_int() shape = [] for ind in range(shape_len): shape.append(client.get_int()) data_bytes = client.check_received_length(data_len) data = np.frombuffer(data_bytes, dtype=data_type) data = data.reshape(tuple(shape)) assert np.all(data == array) client.close() server.stop()
def start_remember_predictions_server(): hostname = '' server = RememberPredictionsServer((hostname, 30000)) print("Starting server") server.start() print("Started server") return server
def wsgiserver(handler): server = gevent.pywsgi.WSGIServer(('127.0.0.1', 54323), handler) server.start() try: yield finally: server.stop()
def test_log(self, mock_datetime): now = datetime(2013, 1, 1, 2, 34, 56, 789012) t_now = int(time.mktime(now.timetuple())) mock_datetime.utcnow.side_effect = lambda: now port = random.randint(1024, 65535) received = [] def handle(socket, address): # graphite is a 4-byte length, followed by pickled representation length, = struct.unpack('!L', socket.recv(4)) d = socket.recv(length) received.append(pickle.loads(d)) server = gevent.server.StreamServer(('', port), handle) server.start() self.create({'port': port}) self.input.put(Event(metric='a.b.c', stats={'mean': 1.5, 'min': 1.0})) self.waitForEmpty() self.i.stop() self.assertEquals(0, self.input.qsize()) server.stop() self.assertEquals(1, len(received)) self.assertEquals([('a.b.c.min', (t_now, 1.0)), ('a.b.c.mean', (t_now, 1.5))], received[0])
def test_send_list(self): listing = [ np.random.rand(7, 2), 'Hello World', 1, 2.654, ] server = SimpleServer( ('127.0.0.1', 0), handle_fun=lambda x: Socket(x).send_list(listing)) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) sleep(0.1) data = [] list_len = client.get_int() for ind in range(list_len): data_type = client.get_string() if data_type == 'scalar': data.append(client.get_scalar()) elif data_type == 'string': data.append(client.get_string()) elif data_type == 'array': data.append(client.get_array()) utils.check_vals_in_iterable(data, listing) client.close() server.stop()
def server(handler): server = gevent.server.StreamServer(listener, handle=handler) server.start() try: yield finally: server.stop()
def test_log(self, mock_datetime): now = datetime(2013, 1, 1, 2, 34, 56, 789012) t_now = int(time.mktime(now.timetuple())) mock_datetime.utcnow.side_effect = lambda: now port = random.randint(1024, 65535) received = [] def handle(socket, address): # graphite is a 4-byte length, followed by pickled representation length, = struct.unpack('!L', socket.recv(4)) d = socket.recv(length) received.append(pickle.loads(d)) server = gevent.server.StreamServer(('', port), handle) server.start() self.create({'port': port}) self.input.put(Event(metric='a.b.c', stats={'mean': 1.5, 'min': 1.0})) self.waitForEmpty() self.i.stop() self.assertEquals(0, self.input.qsize()) server.stop() self.assertEquals(1, len(received)) self.assertEquals([ ('a.b.c.min', (t_now, 1.0)), ('a.b.c.mean', (t_now, 1.5)) ], received[0])
def start(self): self.server = server = gevent.pywsgi.WSGIServer( ('localhost', 0), self.wsgi_application, log=None ) server.start() return server.server_port
def start(self): self.server = server = gevent.pywsgi.WSGIServer( ('localhost', 0), self.wsgi_application, log=False ) server.start() return server.server_port
def main(): hostname = '' port = 1234 server = PlotServer((hostname, port)) log.setLevel("DEBUG") log.info("Starting server") server.start() log.info("Started server") server.serve_forever()
def test_can_reconnect(self): server = TestServer(('127.0.0.1', 0)) server.start() client_config = {'host': '127.0.0.1', 'port': server.server_port} client = Connection('test client', **client_config) client.connect() assert client.socket_connected client.reconnect() assert client.socket_connected server.stop()
def start(self): # TODO errs = {} for server in self.servers: try: server.start() except Exception as e: traceback.print_exc() errs[server] = e if errs: raise RuntimeError(errs)
def main(): # Configure logging for path in ('logging.yml', 'logging.default.yml'): if not os.path.isfile(path): continue with open(path, 'rt') as file: config = yaml.load(file) logging.config.dictConfig(config) # Parse arguments par = argparse.ArgumentParser( description='Export network device configuration via SNMP') par.add_argument('-V', '--version', action='version', version=__version__) par.add_argument('-c', '--community', default='public') par.add_argument('--debug-local-port', dest='local_port', default=69, type=int) par.add_argument('--debug-remote-port', dest='remote_port', default=161, type=int) par.add_argument('--debug-filename', dest='filename', default=None, type=int) par.add_argument('--debug-no-trigger', dest='no_trigger', action="store_true", default=False) par.add_argument('local_addr') par.add_argument('remote_addr') args = par.parse_args() # Determine random filename if args.filename is None: charset = (string.ascii_lowercase + string.digits)[:32] assert 256 % len(charset) == 0 # even distribution filename = "".join(charset[ord(x) % len(charset)] for x in os.urandom(16)) else: filename = args.filename # Start server server = TftpServer((args.local_addr, args.local_port)) server.start() file_obj = server.receive(filename) # Tell switch to start upload if not args.no_trigger: i = random.randint(100000, 999999) snmp = pysnmp.CommandGenerator() community = pysnmp.CommunityData(args.community) target = pysnmp.UdpTransportTarget((args.remote_addr, args.remote_port)) errIndication, errStatus, errIndex, varBinds = snmp.setCmd(community, target, ("1.3.6.1.4.1.9.9.96.1.1.1.1.2.%i" % i, pysnmp_types.Integer(1)), ("1.3.6.1.4.1.9.9.96.1.1.1.1.3.%i" % i, pysnmp_types.Integer(4)), ("1.3.6.1.4.1.9.9.96.1.1.1.1.4.%i" % i, pysnmp_types.Integer(1)), ("1.3.6.1.4.1.9.9.96.1.1.1.1.5.%i" % i, pysnmp_types.IpAddress(args.local_addr)), ("1.3.6.1.4.1.9.9.96.1.1.1.1.6.%i" % i, pysnmp_types.OctetString(filename))) errIndication, errStatus, errIndex, varBinds = snmp.setCmd(community, target, ("1.3.6.1.4.1.9.9.96.1.1.1.1.14.%i" % i, pysnmp_types.Integer(1))) else: print("filename: %s" % filename) # Wait for upload to finish print file_obj.read()
def test_check_sended(self): string = 'this is a message of a given length' server = SimpleServer( ('127.0.0.1', 0), handle_fun=lambda x: Socket(x).check_sended(string.encode())) server.start() client = socket.create_connection(('127.0.0.1', server.server_port)) response = client.recv(4096) assert response == b'this is a message of a given length' client.close() server.stop()
def test_get_int(self): one_integer = 15489 server = SimpleServer( ('127.0.0.1', 0), handle_fun=lambda x: x.sendall(Socket.int_to_bytes(one_integer))) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) assert client.get_int() == one_integer client.close() server.stop()
def test_get_string(self): string = 'this is a message' server = SimpleServer( ('127.0.0.1', 0), handle_fun=lambda x: Socket(x).send_string(string)) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) assert client.get_string() == string client.close() server.stop()
def server(handler, backlog=1): server = gevent.server.StreamServer(listener, backlog=backlog, handle=handler, keyfile=KEY, certfile=CERT) server.start() try: yield finally: server.stop()
def test_can_make_connection(self): server = TestServer(('127.0.0.1', 0)) server.start() client_config = {'host': '127.0.0.1', 'port': server.server_port} client = Connection('test client', **client_config) client.connect() assert client.socket_connected response = client.input_queue.get() assert response == 'testing the connection' assert not client.needs_reconnect server.stop()
def server(handler, backlog=1): server = gevent.server.StreamServer(("localhost", 0), backlog=backlog, handle=handler, keyfile=KEY, certfile=CERT) server.start() try: yield (server.server_host, server.server_port) finally: server.stop()
def test_can_send_data(self): server = TestServer(('127.0.0.1', 0)) server.start() client_config = {'host': '127.0.0.1', 'port': server.server_port} client = Connection('test client', **client_config) client.connect() assert client.socket_connected client.output_queue.put('testing sending') time.sleep(2) self.assertEqual(server.input_data, 'testing sending\r\n') server.stop()
def test_does_connect(): class SimpleServer(gevent.server.StreamServer): def handle(self, socket, address): socket.sendall("hello and goodbye!") socket.shutdown(0) server = SimpleServer(('127.0.0.1', 0)) server.start() client = util.connect_and_retry(('127.0.0.1', server.server_port)) lines = [line for line in util.line_protocol(client)] assert len(lines) == 1, "Didn't receive the line" server.stop()
def server(handler, backlog=1): server = gevent.server.StreamServer( ("localhost", 0), backlog=backlog, handle=handler, keyfile=KEY, certfile=CERT) server.start() try: yield (server.server_host, server.server_port) finally: server.stop()
def test_check_received_length(self): string = 'this is a message' server = SimpleServer( ('127.0.0.1', 0), handle_fun=lambda x: Socket(x).check_sended(string.encode())) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) assert client.check_received_length(len( string.encode())) == string.encode() client.close() server.stop()
def server(handler, backlog=1): server = gevent.server.StreamServer( listener, backlog=backlog, handle=handler, keyfile=KEY, certfile=CERT) server.start() try: yield finally: server.stop()
def test_does_connect(): class SimpleServer(gevent.server.StreamServer): def handle(self, socket, address): socket.sendall("hello and goodbye!") socket.shutdown(0) server = SimpleServer(("127.0.0.1", 0)) server.start() client = util.connect_and_retry(("127.0.0.1", server.server_port)) lines = [line for line in util.line_protocol(client)] assert len(lines) == 1, "Didn't receive the line" server.stop()
def test_send_string(self): string = 'this is a message' server = SimpleServer( ('127.0.0.1', 0), handle_fun=lambda x: Socket(x).send_string(string)) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) len_string = int.from_bytes(client.check_received_length(4), 'big') assert len_string == len(string) assert client.check_received_length(len_string) == string.encode() client.close() server.stop()
def test_one_liner(): one_line = 'hello and goodbye!' class SimpleServer(gevent.server.StreamServer): def handle(self, socket, address): socket.sendall(one_line) socket.shutdown(0) server = SimpleServer(('127.0.0.1', 0)) server.start() client = gevent.socket.create_connection(('127.0.0.1', server.server_port)) lines = [line for line in util.line_protocol(client)] assert len(lines) == 1, "Got too many (or not enough) lines" assert lines[0] == one_line, "Didn't get the line expected" server.stop()
def test_get_scalar(self): scalars = [15489, 2.4589] server = SimpleServer(('127.0.0.1', 0), ) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) sleep(0.1) server_socket = Socket(server.do_read()[0]) for scalar in scalars: server_socket.send_scalar(scalar) assert client.get_scalar() == scalar client.close() server.stop()
def test_multi_lines_rn(): one_line = 'hello and goodbye!' number_lines = 5 class SimpleServer(gevent.server.StreamServer): def handle(self, socket, address): socket.sendall('\r\n'.join([one_line for n in xrange(number_lines)])) socket.shutdown(0) server = SimpleServer(('127.0.0.1', 0)) server.start() client = gevent.socket.create_connection(('127.0.0.1', server.server_port)) lines = [line for line in util.line_protocol(client)] assert len(lines) == number_lines, "Got too many (or not enough) lines" assert lines.pop() == one_line, "Didn't get the line expected" server.stop()
def test_strip_on_lines(): one_line = 'hello and goodbye!\n' number_lines = 5 class SimpleServer(gevent.server.StreamServer): def handle(self, socket, address): socket.sendall(''.join([one_line for n in xrange(number_lines)])) socket.shutdown(0) server = SimpleServer(('127.0.0.1', 0)) server.start() client = gevent.socket.create_connection(('127.0.0.1', server.server_port)) lines = [line for line in util.line_protocol(client)] assert len(lines) == number_lines, "Got too many (or not enough) lines" assert lines.pop() != one_line, "Line includes newlines (or something)" assert lines.pop() == one_line.strip(), "Line doesn't match when stripped" server.stop()
def test_multi_lines(): one_line = 'hello and goodbye!' number_lines = 5 class SimpleServer(gevent.server.StreamServer): def handle(self, socket, address): socket.sendall('\n'.join([one_line for n in xrange(number_lines)])) socket.shutdown(0) server = SimpleServer(('127.0.0.1', 0)) server.start() client = gevent.socket.create_connection(('127.0.0.1', server.server_port)) lines = [line for line in util.line_protocol(client)] assert len(lines) == number_lines, "Got too many (or not enough) lines" assert lines.pop() == one_line, "Didn't get the line expected" server.stop()
def test_no_strip_on_lines(): one_line = 'hello and goodbye!\n' number_lines = 5 class SimpleServer(gevent.server.StreamServer): def handle(self, socket, address): socket.sendall(''.join([one_line for n in xrange(number_lines)])) socket.sendall('\n') socket.shutdown(0) server = SimpleServer(('127.0.0.1', 0)) server.start() client = gevent.socket.create_connection(('127.0.0.1', server.server_port)) lines = [line for line in util.line_protocol(client, strip=False)] assert len(lines) == number_lines+1, "Got too many (or not enough) lines" assert lines.pop() == '\n', "Didn't get empty line" assert lines.pop() == one_line, "Line doesn't match line with newline" server.stop()
def main(number=1, bind=DEFAULT_BIND, port=DEFAULT_PORT, **kwargs): servers = [] logging.info('starting simulator...') for i in range(number): address = bind, port+i server = PSSimulator(address) server.start() servers.append(server) server.log.info('simulator listenning on %r!', address) try: while True: # gevent.joinall(servers) gevent.sleep(1) except KeyboardInterrupt: logging.info('Ctrl-C pressed. Bailing out!') for server in servers: server.stop()
def test_send_another_list(self): listing = [ 'another list that should raise an exception because there is a boolean that is not a valid type', [ 'gg', ], ] server = SimpleServer(('127.0.0.1', 0), ) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) sleep(0.1) server_socket = Socket(server.do_read()[0]) with pytest.raises(TypeError): assert server_socket.send_list(listing) server.stop()
def test_no_strip_on_lines(): one_line = 'hello and goodbye!\n' number_lines = 5 class SimpleServer(gevent.server.StreamServer): def handle(self, socket, address): socket.sendall(''.join([one_line for n in xrange(number_lines)])) socket.sendall('\n') socket.shutdown(0) server = SimpleServer(('127.0.0.1', 0)) server.start() client = gevent.socket.create_connection(('127.0.0.1', server.server_port)) lines = [line for line in util.line_protocol(client, strip=False)] assert len(lines) == number_lines + 1, "Got too many (or not enough) lines" assert lines.pop() == '\n', "Didn't get empty line" assert lines.pop() == one_line, "Line doesn't match line with newline" server.stop()
def test_send_scalar(self): scalars = [15489, 2.4589] server = SimpleServer(('127.0.0.1', 0), ) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) sleep(0.1) server_socket = Socket(server.do_read()[0]) for scalar in scalars: server_socket.send_scalar(scalar) data_type = client.get_string() data_len = client.get_int() data_bytes = client.check_received_length(data_len) data = np.frombuffer(data_bytes, dtype=data_type)[0] assert data == scalar client.close() server.stop()
def test_get_array(self): arrays = [ np.random.rand(7, 1), np.random.rand(10, 2), np.random.rand(10, 2, 4), np.random.rand(10, 1, 3), np.random.rand(10, 4, 3, 1), ] server = SimpleServer(('127.0.0.1', 0), ) server.start() client = Socket( socket.create_connection(('127.0.0.1', server.server_port))) server_socket = Socket(server.do_read()[0]) for array in arrays: server_socket.send_array(array) data = client.get_array() assert np.all(data == array) client.close() server.stop()
def run(self, test): """ This is the entry point for the run of the tests. This runner starts the test queue server and spawns the test runner clients. (Part of TextTestRunner API) Execute the test (which may be a test suite). If the test is a suite, distribute it out among as many processes as have been configured, at as fine a level as is possible given the context fixtures defined in the suite or any sub-suites. """ log.debug("%s.run(%s) (%s)", self, test, os.getpid()) tasks_queue = [] remaining_tasks = [] # contains a list of task addresses #completed_tasks = [] # list of tuples like [task_address, batch_result] to_teardown = [] #thrownError = None # API test = self.config.plugins.prepareTest(test) or test self.stream = self.config.plugins.setOutputStream(self.stream) or self.stream result = self._makeResult() start = time.time() # populates the queues self.collect_tasks(test, tasks_queue, remaining_tasks, to_teardown, result) queue_manager = TestsQueueManager( tasks_queue, loader_class=self.loaderClass, result_class=result.__class__, config=self.config ) server = WSGIServer(queue_manager) server_port = server.start() results_processor = queue_manager.start_test_results_processor(remaining_tasks, result, self.stream, self.config.stopOnError) number_of_workers = self.config.options.gevented_processes run_clients( number_of_workers, server_port ) # <-- blocks until all are done consuming the queue results_processor.join() # TODO: not call tests / set ups could have ran. see if we can prune the tearDown collection as result for case in to_teardown: try: case.tearDown() except (KeyboardInterrupt, SystemExit): raise
def test_unavailable(self): port = random.randint(1024, 65535) received = [] def handle(socket, address): # graphite is a 4-byte length, followed by pickled representation length, = struct.unpack('!L', socket.recv(4)) d = socket.recv(length) received.append(pickle.loads(d)) server = gevent.server.StreamServer(('', port), handle) self.create({'port': port}) self.input.put(Event(metric='a.b.c', stats={'mean': 1.5, 'min': 1.0})) server.start() self.waitForEmpty() self.assertEquals(0, self.input.qsize()) self.i.stop() server.stop() self.assertEquals(1, len(received))
def start(self): server = gevent.server.StreamServer(self.address, self.handle) # creates a new server server.start() # start accepting new connections
def lbmain(args=None, run=True): """%prog [options] zookeeper_connection path Run a resume-based load balancer on addr. """ if args is None: args = sys.argv[1:] elif isinstance(args, str): args = args.split() run = False import optparse parser = optparse.OptionParser(lbmain.__doc__) parser.add_option( '-a', '--address', default=':0', help="Address to listed on for web requests" ) parser.add_option( '-b', '--backlog', type='int', help="Server backlog setting.") parser.add_option( '-d', '--backdoor', action='store_true', help="Run a backdoor server. Use with caution!") parser.add_option( '-e', '--disconnect-message', help="Path to error page to use when a request is lost due to " "worker disconnection" ) parser.add_option( '-L', '--logger-configuration', help= "Read logger configuration from the given configuration file path.\n" "\n" "The configuration file must be in ZConfig logger configuration syntax." "\n" "Alternatively, you can give a Python logger level name or number." ) parser.add_option('-l', '--access-logger', help='Access-log logger name.') parser.add_option( '-m', '--max-connections', type='int', help="Maximum number of simultanious accepted connections.") parser.add_option( '-r', '--request-classifier', default='zc.resumelb.lb:host_classifier', help="Request classification function (module:expr)" ) parser.add_option( '-s', '--status-server', help=("Run a status server for getting pool information. " "The argument is a unix-domain socket path to listen on.")) parser.add_option( '-t', '--socket-timeout', type='float', default=99., help=('HTTP socket timeout.')) parser.add_option( '-v', '--single-version', action='store_true', help=('Only use a single worker version.')) try: options, args = parser.parse_args(args) if len(args) != 2: print 'Error: must supply a zookeeper connection string and path.' parser.parse_args(['-h']) zookeeper, path = args except SystemExit: if run: raise else: return if options.logger_configuration: logger_config = options.logger_configuration if re.match(r'\d+$', logger_config): logging.basicConfig(level=int(logger_config)) elif logger_config in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'): logging.basicConfig(level=getattr(logging, logger_config)) else: import ZConfig with open(logger_config) as f: ZConfig.configureLoggers(f.read()) zk = zc.zk.ZooKeeper(zookeeper) addrs = zk.children(path+'/workers/providers') rcmod, rcexpr = options.request_classifier.split(':') __import__(rcmod) rcmod = sys.modules[rcmod] request_classifier = eval(rcexpr, rcmod.__dict__) disconnect_message = options.disconnect_message if disconnect_message: with open(disconnect_message) as f: disconnect_message = f.read() else: disconnect_message = zc.resumelb.lb.default_disconnect_message from zc.resumelb.lb import LB lb = LB(map(zc.parse_addr.parse_addr, ()), request_classifier, disconnect_message, single_version=options.single_version) to_send = [[]] # Set up notification of address changes. awatcher = gevent.get_hub().loop.async() @awatcher.start def _(): lb.set_worker_addrs(to_send[0]) if options.single_version: @addrs def get_addrs(a): to_send[0] = dict( (zc.parse_addr.parse_addr(addr), zk.get_properties( path+'/workers/providers/'+addr).get('version') ) for addr in addrs) awatcher.send() else: @addrs def get_addrs(a): to_send[0] = map(zc.parse_addr.parse_addr, addrs) awatcher.send() # Set up notification of address changes. settings = zk.properties(path) swatcher = gevent.get_hub().loop.async() swatcher.start(lambda : lb.update_settings(settings)) settings(lambda a: swatcher.send()) lb.zk = zk lb.__zk = addrs, settings # Now, start a wsgi server addr = zc.parse_addr.parse_addr(options.address) if options.max_connections: spawn= gevent.pool.Pool(options.max_connections) else: spawn = 'default' if options.access_logger: accesslog = AccessLog(options.access_logger) else: accesslog = None server = WSGIServer( addr, lb.handle_wsgi, backlog=options.backlog, spawn=spawn, log=accesslog, socket_timeout=options.socket_timeout) server.start() registration_data = {} if options.backdoor: from gevent import backdoor bd = backdoor.BackdoorServer(('127.0.0.1', 0), locals()) bd.start() registration_data['backdoor'] = '127.0.0.1:%s' % bd.server_port status_server = None if options.status_server: def status(socket, addr): pool = lb.pool writer = socket.makefile('w') writer.write(json.dumps( dict( backlog = pool.backlog, mean_backlog = pool.mbacklog, workers = [ (worker.__name__, worker.backlog, worker.mbacklog, (int(worker.oldest_time) if worker.oldest_time else None), ) for worker in sorted( pool.workers, key=lambda w: w.__name__) ] ))+'\n') writer.close() socket.close() status_server_address = options.status_server if os.path.exists(status_server_address): os.remove(status_server_address) sock = gevent.socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.bind(status_server_address) sock.listen(5) status_server = gevent.server.StreamServer(sock, status) status_server.start() zk.register_server(path+'/providers', (addr[0], server.server_port), **registration_data) def shutdown(): zk.close() server.close() if status_server is not None: status_server.close() lb.shutdown() gevent.signal(signal.SIGTERM, shutdown) if run: try: server.serve_forever() finally: logging.getLogger(__name__+'.lbmain').info('exiting') zk.close() else: gevent.sleep(.01) return lb, server
def run(self, test): """ This is the entry point for the run of the tests. This runner starts the test queue server and spawns the test runner clients. (Part of TextTestRunner API) Execute the test (which may be a test suite). If the test is a suite, distribute it out among as many processes as have been configured, at as fine a level as is possible given the context fixtures defined in the suite or any sub-suites. """ log.debug("%s.run(%s) (%s)", self, test, os.getpid()) tasks_queue = [] # contains a list of task addresses remaining_tasks = [] # list of tuples like [task_address, batch_result] # completed_tasks = [] to_teardown = [] # thrownError = None # API test = self.config.plugins.prepareTest(test) or test self.stream = self.config.plugins.setOutputStream(self.stream) \ or self.stream result = self._makeResult() start = time.time() # populates the queues self.collect_tasks(test, tasks_queue, remaining_tasks, to_teardown, result) if self.config.options.gevented_timing_file: try: task_times = pickle.load( open(self.config.options.gevented_timing_file, 'r')) except: log.debug('No task times to read for sorting.') task_times = {} if task_times: log.debug('Unsorted tasks: {}'.format(tasks_queue)) tasks_queue.sort( key=lambda t: task_times.get(get_task_key(t), 0), reverse=True) log.debug('Sorted tasks: {}'.format(tasks_queue)) else: task_times = {} queue_manager = TestsQueueManager( tasks_queue, loader_class=self.loaderClass, result_class=result.__class__, config=self.config, task_times=task_times ) server = WSGIServer(queue_manager) server_port = server.start() results_processor = queue_manager.start_test_results_processor( remaining_tasks, result, self.stream, self.config.stopOnError) number_of_workers = self.config.options.gevented_processes run_clients( number_of_workers, server_port ) # <-- blocks until all are done consuming the queue queue_manager.run_clients_event.set() results_processor.join() # TODO: not call tests / set ups could have ran. see if we can prune # the tearDown collection as result for case in to_teardown: try: case.tearDown() except (KeyboardInterrupt, SystemExit): raise except: result.addError(case, sys.exc_info()) stop = time.time() if self.config.options.gevented_timing_file: try: pickle.dump( queue_manager.task_times, open(self.config.options.gevented_timing_file, 'w')) except: log.exception('Error saving task times to {}'.format( self.config.options.gevented_timing_file)) # first write since can freeze on shutting down processes result.printErrors() result.printSummary(start, stop) self.config.plugins.finalize(result) # except (KeyboardInterrupt, SystemExit): # if thrownError: # raise thrownError # else: # raise return result
def main(ui_hostname, ui_port, base_name, params_filename, plot_sensors, use_ui_server, adapt_model, save_data, n_updates_per_break, batch_size, learning_rate, n_min_trials, trial_start_offset, break_start_offset, break_stop_offset, pred_gap, incoming_port,load_old_data,use_new_adam_params, input_time_length, train_on_breaks, min_break_samples, min_trial_samples): setup_logging() assert np.little_endian, "Should be in little endian" train_params = None # for trainer, e.g. adam params if params_filename is not None: if params_filename == 'newest': # sort will already sort temporally with our time string format all_params_files = sorted(glob(base_name + ".*.model_params.npy")) assert len(all_params_files) > 0, ("Expect atleast one params file " "if 'newest' given as argument") params_filename = all_params_files[-1] log.info("Loading model params from {:s}".format(params_filename)) params = np.load(params_filename) train_params_filename = params_filename.replace('model_params.npy', 'trainer_params.npy') if os.path.isfile(train_params_filename): if use_new_adam_params: log.info("Loading trainer params from {:s}".format(train_params_filename)) train_params = np.load(train_params_filename) else: log.warn("No train/adam params found, starting optimization params " "from scratch (model params will be loaded anyways).") else: params = np.load(base_name + '.npy') exp = create_experiment(base_name + '.yaml') # Possibly change input time length, for exmaple # if input time length very long during training and should be # shorter for online if input_time_length is not None: log.info("Change input time length to {:d}".format(input_time_length)) set_input_window_length(exp.final_layer, input_time_length) # probably unnecessary, just for safety exp.iterator.input_time_length = input_time_length # Have to set for both exp final layer and actually used model # as exp final layer might be used for adaptation # maybe check this all for correctness? cnt_model = exp.final_layer set_param_values_backwards_compatible(cnt_model, params) prediction_model = transform_to_normal_net(cnt_model) set_param_values_backwards_compatible(prediction_model, params) data_processor = StandardizeProcessor(factor_new=1e-3) online_model = OnlineModel(prediction_model) if adapt_model: online_trainer = BatchWiseCntTrainer(exp, n_updates_per_break, batch_size, learning_rate, n_min_trials, trial_start_offset, break_start_offset=break_start_offset, break_stop_offset=break_stop_offset, train_param_values=train_params, add_breaks=train_on_breaks, min_break_samples=min_break_samples, min_trial_samples=min_trial_samples) else: log.info("Not adapting model...") online_trainer = NoTrainer() coordinator = OnlineCoordinator(data_processor, online_model, online_trainer, pred_gap=pred_gap) hostname = '' server = PredictionServer((hostname, incoming_port), coordinator=coordinator, ui_hostname=ui_hostname, ui_port=ui_port, plot_sensors=plot_sensors, use_ui_server=use_ui_server, save_data=save_data, model_base_name=base_name, adapt_model=adapt_model) # Compilation takes some time so initialize trainer already # before waiting in connection in server online_trainer.initialize() if adapt_model and load_old_data: online_trainer.add_data_from_today(data_processor) log.info("Starting server on port {:d}".format(incoming_port)) server.start() log.info("Started server") server.serve_forever()
import gevent.server import signal def handle(socket, address): print ("new connection") # using a makefile because we want to use readline() socket_file = socket.makefile() while True: i_sample = socket_file.readline() preds = socket_file.readline() print i_sample print preds gevent.signal(signal.SIGQUIT, gevent.kill) hostname = '' port = 30000 server = gevent.server.StreamServer((hostname, port), handle) print("Starting server on port {:d}".format(port)) server.start() print("Started server") server.serve_forever()
def __init__(self): self.server = server = gevent.server.StreamServer( ('127.0.0.1', 0), self.handle) server.start() self.addr = '127.0.0.1', server.server_port
def main(ui_hostname, ui_port, base_name, params_filename, plot_sensors, use_ui_server, adapt_model, save_data, n_updates_per_break, batch_size, learning_rate, n_min_trials, trial_start_offset, break_start_offset, break_stop_offset, pred_freq, incoming_port,load_old_data,use_new_adam_params, input_time_length): setup_logging() assert np.little_endian, "Should be in little endian" train_params = None # for trainer, e.g. adam params if params_filename is not None: if params_filename == 'newest': # sort will already sort temporally with our time string format all_params_files = sorted(glob(base_name + ".*.model_params.npy")) assert len(all_params_files) > 0, ("Expect atleast one params file " "if 'newest' given as argument") params_filename = all_params_files[-1] log.info("Loading model params from {:s}".format(params_filename)) params = np.load(params_filename) train_params_filename = params_filename.replace('model_params.npy', 'trainer_params.npy') if os.path.isfile(train_params_filename): if use_new_adam_params: log.info("Loading trainer params from {:s}".format(train_params_filename)) train_params = np.load(train_params_filename) else: log.warn("No train/adam params found, starting optimization params " "from scratch (model params will be loaded anyways).") else: params = np.load(base_name + '.npy') exp = create_experiment(base_name + '.yaml') # Possibly change input time length, for exmaple # if input time length very long during training and should be # shorter for online if input_time_length is not None: log.info("Change input time length to {:d}".format(input_time_length)) set_input_window_length(exp.final_layer, input_time_length) # probably unnecessary, just for safety exp.iterator.input_time_length = input_time_length # Have to set for both exp final layer and actually used model # as exp final layer might be used for adaptation # maybe check this all for correctness? cnt_model = exp.final_layer lasagne.layers.set_all_param_values(cnt_model, params) prediction_model = transform_to_normal_net(cnt_model) lasagne.layers.set_all_param_values(prediction_model, params) data_processor = StandardizeProcessor(factor_new=1e-3) online_model = OnlineModel(prediction_model) if adapt_model: online_trainer = BatchWiseCntTrainer(exp, n_updates_per_break, batch_size, learning_rate, n_min_trials, trial_start_offset, break_start_offset=break_start_offset, break_stop_offset=break_stop_offset, train_param_values=train_params) else: log.info("Not adapting model...") online_trainer = NoTrainer() coordinator = OnlineCoordinator(data_processor, online_model, online_trainer, pred_freq=pred_freq) hostname = '' server = PredictionServer((hostname, incoming_port), coordinator=coordinator, ui_hostname=ui_hostname, ui_port=ui_port, plot_sensors=plot_sensors, use_ui_server=use_ui_server, save_data=save_data, model_base_name=base_name, adapt_model=adapt_model) # Compilation takes some time so initialize trainer already # before waiting in connection in server online_trainer.initialize() if adapt_model and load_old_data: online_trainer.add_data_from_today(data_processor) log.info("Starting server on port {:d}".format(incoming_port)) server.start() log.info("Started server") server.serve_forever()