def __init__(self, args=None): '''Args must be an object with the following attributes: foreground, logfile, mailbox, nClients, silent, socketpath, verbose Suitable defaults will be supplied.''' # Pass command line args to ProtocolIVSHMSG, then open logging. if args is None: args = argparse.Namespace() for arg, default in self._required_arg_defaults.items(): setattr(args, arg, getattr(args, arg, default)) # Mailbox may be sized above the requested number of clients to # satisfy QEMU IVSHMEM restrictions. args.server_id = args.nClients + 1 args.nEvents = args.nClients + 2 # It's a singleton so no reason to keep the instance, however it's # the way I wrote the Klein API server so... mb = MB(args=args) MailBoxReSTAPI(mb) shutdown_http_logging() if args.foreground: if args.verbose > 1: TPlog.startLogging(sys.stdout, setStdout=False) else: TPlog.startLogging(open('/dev/null', 'a'), setStdout=False) else: PRINT('Logging to %s' % args.logfile) TPlog.startLogging( DailyLogFile.fromFullPath(args.logfile), setStdout=True) # "Pass-through" explicit print() for debug args.logmsg = TPlog.msg args.logerr = TPlog.err # By Twisted version 18, "mode=" is deprecated and you should just # inherit the tacky bit from the parent directory. wantPID creates # <path>.lock as a symlink to "PID". E = UNIXServerEndpoint( TIreactor, args.socketpath, mode=0o666, # Deprecated at Twisted 18 wantPID=True) E.listen(self) args.logmsg( '%s server @%d ready for %d clients on %s' % (args.title, args.server_id, args.nClients, args.socketpath)) # https://stackoverflow.com/questions/1411281/twisted-listen-to-multiple-ports-for-multiple-processes-with-one-reactor # Voodoo kick to a) set up one-time SI and b)setup commander. # Docs mislead, have to explicitly pass something to get persistent # state across protocol/transport invocations. As there is only # one server object per process instantion, that's not necessary. protobj = ProtocolIVSHMSGServer(self, args) # With "args" Commander(protobj)
def __init__(self, name, socket_file, fields): """ Construct the module with its *name*, and its *fields*. Also try to create a socket unix server endpoint on *socket_file* path. """ threading.Thread.__init__(self) self.name = name self.fields = fields self.ready_file = path.ready_file(self.name) self.exitcode = self.SUCCESS_EXIT if os.path.exists(socket_file): os.remove(socket_file) endpoint = ServerEndpoint(reactor, socket_file) endpoint.listen(connection.Factory(self))
def __call__(self, *args, **kwargs): obj = super(BaseMeta, self).__call__(*args, **kwargs) cls = type(obj) # Handle module name if not hasattr(obj, 'module_name'): if not hasattr(cls, 'module_name'): setattr(obj, 'module_name', cls.__name__) else: setattr(obj, 'module_name', cls.module_name) if obj.module_name in type(self).ls_name: raise NameError('Module with same name already exist') type(self).ls_name.add(obj.module_name) # Handle module socket (server side) setattr(obj, 'module_socket', path.socket_file(obj.module_name)) try: os.remove(obj.module_socket) except OSError: pass endpoint = ServerEndpoint(reactor, obj.module_socket) endpoint.listen(connection.Factory(obj)) # Handle module fields from khome.fields import Base as Field ls_fields = [] for f_cls in cls.__dict__.keys(): f_cls = getattr(cls, f_cls) if isinstance(f_cls, type) and issubclass(f_cls, Field): field = f_cls() setattr(obj, field.field_name, prop_field(field)) setattr(field, 'module', obj) ls_fields += [field] setattr(obj, 'module_fields', ls_fields) # Logger setattr(obj, 'logger', logging.getLogger(obj.module_name)) #setup_logger(obj.logger) _lauched_modules.append(obj) return obj
class Server: def __init__(self, addr, reactor, callback): self.addr = addr self.reactor = reactor self.factory = ServerFactory(callback) self.endpoint = UNIXServerEndpoint(reactor, addr) self.listen_deferred = self.endpoint.listen(self.factory) logging.info("mcmon server listening on {}".format(addr)) def stop(self): self.stop_deferred = self.endpoint.stop_listening() return self.stop_deferred
def __init__(self, args=None): '''Args must be an object with the following attributes: foreground, logfile, mailbox, nClients, silent, socketpath, verbose Suitable defaults will be supplied.''' # Pass command line args to ProtocolIVSHMSG, then open logging. if args is None: args = argparse.Namespace() for arg, default in self._required_arg_defaults.items(): setattr(args, arg, getattr(args, arg, default)) # Mailbox may be sized above the requested number of clients to # satisfy QEMU IVSHMEM restrictions. args.server_id = args.nClients + 1 args.nEvents = args.nClients + 2 FAMEZ_MailBox(args=args) # singleton class, no need to keep instance self.cmdlineargs = args if args.foreground: TPlog.startLogging(sys.stdout, setStdout=False) else: PRINT('Logging to %s' % args.logfile) TPlog.startLogging( DailyLogFile.fromFullPath(args.logfile), setStdout=True) # "Pass-through" explicit print() for debug args.logmsg = TPlog.msg args.logerr = TPlog.err # By Twisted version 18, "mode=" is deprecated and you should just # inherit the tacky bit from the parent directory. wantPID creates # <path>.lock as a symlink to "PID". E = UNIXServerEndpoint( TIreactor, args.socketpath, mode=0o666, # Deprecated at Twisted 18 wantPID=True) E.listen(self) args.logmsg('FAME-Z server @%d ready for %d clients on %s' % (args.server_id, args.nClients, args.socketpath))
WHERE time < ? AND time > ? ORDER BY time DESC' \ % (', '.join(fields), table_name) result = self.sql_query(query, (time_to, time_from), True) out = {'success': False} if result: out = {} out['success'] = True out['data'] = [{'time': r[0], 'fields': [{'name': n, 'value': v} for n, v in zip(fields, r[1:])]} for r in result] out_json = json.dumps(out) print out_json self.transport.write(out_json) class DBFactory(Factory): def __init__(self, db_filename): self.db_filename = db_filename def startFactory(self): self.db_conn = lite.connect(self.db_filename) def stopFactory(self): self.db_conn.close() def buildProtocol(self, addr): return Database(self.db_conn) db_filename = 'scutu.db' endpoint = ServerEndpoint(reactor, './scutu.sock') endpoint.listen(DBFactory(db_filename)) reactor.run()
self.current_buffer = self.current_buffer[index + 1:] index = 0 except ValueError: pass # Pas la bonne accolade (JSON imbriqué) index = self.current_buffer.find('}', index + 1) pass pass class GProxyFactory(Factory): protocol = GProxyProtocol def __init__(self, remote): self.remote = remote pass endpoint = TCP4ServerEndpoint(reactor, 1863) gfactory = GreenhubFactory() endpoint.listen(gfactory) localendpoint = UNIXServerEndpoint(reactor, "/tmp/greenhub.sock") localendpoint.listen(GProxyFactory(gfactory)) debug("INFO", "Serveur démarré.") reactor.run()
# Give process it's own session under init os.setsid() # Final fork try: pid = os.fork() if pid > 0: pid_file = open(working_dir + '/mailrfd.pid', 'w') pid_file.write(str(pid)) pid_file.close() sys.exit() except OSError, e: print e sys.exit(1) if __debug__: log.startLogging(sys.stderr) else: syslog.startLogging(prefix='mailrfd', options=LOG_PID, facility=LOG_MAIL) if __debug__: endpoint = TCP4ServerEndpoint(reactor, 8027, interface='localhost') else: endpoint = UNIXServerEndpoint(reactor, working_dir + '/socket') endpoint.listen(MailRfFactory()) reactor.run()
print "READY" def buildProtocol(self, addr): return TraphProtocol(self.traph) def close(self): self.traph.close() if __name__ == "__main__": sock = sys.argv[1] corpus = sys.argv[2] try: with open(sock+"-options.json") as f: options = json.load(f) except: options = {} traph = TraphServerFactory(corpus, **options) endpoint = UNIXServerEndpoint(reactor, sock) server_listening_deferred = endpoint.listen(traph) @server_listening_deferred.addErrback def server_listening_failed(failure): print failure.value reactor.stop() @server_listening_deferred.addCallback def server_listen_callback(twisted_port): traph.ready() reactor.run()
class IPCMasterService(service.Service, object): """ IPC master service. Provides the master side of the IPC communication between the workers. """ UPDATE_INTERVAL = 60 # 60 seconds. REMOVE_INTERVAL = 90 # 90 seconds. connections = None def __init__( self, reactor, workers=None, socket_path=None): super(IPCMasterService, self).__init__() self.reactor = reactor self.workers = workers self.socket_path = socket_path if self.socket_path is None: self.socket_path = get_ipc_socket_path() if os.path.exists(self.socket_path): os.remove(self.socket_path) self.endpoint = UNIXServerEndpoint(reactor, self.socket_path) self.port = None self.connections = {} self.factory = Factory.forProtocol(IPCMaster) self.factory.service = self self.updateLoop = LoopingCall(self.update) @asynchronous def startService(self): """Start listening on UNIX socket and create the region controller.""" super(IPCMasterService, self).startService() self.starting = self.endpoint.listen(self.factory) def save_port(port): self.port = port @transactional def create_region(result): RegionController.objects.get_or_create_running_controller() def start_update_loop(result): self.updateLoopDone = self.updateLoop.start(self.UPDATE_INTERVAL) def log_failure(failure): if failure.check(CancelledError): log.msg("IPCMasterService start-up has been cancelled.") else: log.err(failure, "IPCMasterService start-up failed.") self.starting.addCallback(save_port) self.starting.addCallback(partial(deferToDatabase, create_region)) self.starting.addCallback(start_update_loop) self.starting.addErrback(log_failure) # Twisted's service framework does not track start-up progress, i.e. # it does not check for Deferreds returned by startService(). Here we # return a Deferred anyway so that direct callers (esp. those from # tests) can easily wait for start-up. return self.starting @asynchronous @inlineCallbacks def stopService(self): """Stop listening.""" self.starting.cancel() if self.port: self.port, port = None, self.port yield port.stopListening() for data in self.connections.values(): try: yield data['connection'].transport.loseConnection() except: log.err(None, "Failure when closing IPC connection.") @transactional def delete_all_processes(): region = RegionController.objects.get_running_controller() region.processes.all().delete() @asynchronous def stop_update_loop(): if self.updateLoop.running: self.updateLoop.stop() return self.updateLoopDone yield deferToDatabase(delete_all_processes) yield stop_update_loop() yield super(IPCMasterService, self).stopService() @asynchronous def registerWorker(self, pid, conn): """Register the worker with `pid` using `conn`.""" @transactional def create_process(pid): region = RegionController.objects.get_running_controller() process, _ = RegionControllerProcess.objects.get_or_create( region=region, pid=pid) return (pid, process.id) def log_connected(result): pid, process_id = result log.msg("Worker pid:%d IPC connected." % pid) return result def add_to_connections(result): pid, process_id = result self.connections[pid] = { 'process_id': process_id, 'connection': conn, 'rpc': { 'port': None, 'connections': set(), } } return process_id @transactional def update_service(process_id): region = RegionController.objects.get_running_controller() self._updateService(region) return process_id def return_result(process_id): return { 'process_id': process_id, } d = deferToDatabase(create_process, pid) d.addCallback(log_connected) d.addCallback(add_to_connections) d.addCallback(partial(deferToDatabase, update_service)) d.addCallback(return_result) return d def getPIDFromConnection(self, conn): """Get the PID from the connection.""" for pid, data in self.connections.items(): if data['connection'] == conn: return pid @asynchronous def unregisterWorker(self, conn, reason): """Unregister the worker with `pid` because of `reason`.""" pid = self.getPIDFromConnection(conn) if pid: @transactional def delete_process(pid): process_id = self.connections[pid]['process_id'] RegionControllerProcess.objects.filter(id=process_id).delete() return pid def remove_conn_kill_worker(pid): del self.connections[pid] if self.workers: self.workers.killWorker(pid) return pid def log_disconnected(pid): log.msg("Worker pid:%d IPC disconnected." % pid) d = deferToDatabase(delete_process, pid) d.addCallback(remove_conn_kill_worker) d.addCallback(log_disconnected) return d def _getListenAddresses(self, port): """Return list of tuple (address, port) for the addresses the worker is listening on.""" addresses = get_all_interface_source_addresses() if addresses: return set( (addr, port) for addr in addresses ) # There are no non-loopback addresses, so return loopback # address as a fallback. loopback_addresses = set() for addr in get_all_interface_addresses(): ipaddr = IPAddress(addr) if ipaddr.is_link_local(): continue # Don't advertise link-local addresses. if ipaddr.is_loopback(): loopback_addresses.add((addr, port)) return loopback_addresses @synchronous @transactional def _updateEndpoints(self, process, addresses): """Update the endpoints for `pid` and `port`.""" previous_endpoint_ids = set( RegionControllerProcessEndpoint.objects.filter( process=process).values_list("id", flat=True)) if addresses: for addr, port in addresses: endpoint, created = ( RegionControllerProcessEndpoint.objects.get_or_create( process=process, address=addr, port=port)) if not created: previous_endpoint_ids.remove(endpoint.id) RegionControllerProcessEndpoint.objects.filter( id__in=previous_endpoint_ids).delete() @synchronous def _getProcessObjFor(self, pid): """Return `RegionControllerProcess` for `pid`.""" process_id = self.connections[pid]['process_id'] try: return RegionControllerProcess.objects.get(id=process_id) except RegionControllerProcess.DoesNotExist: region_obj = RegionController.objects.get_running_controller() return RegionControllerProcess.objects.create( id=process_id, region=region_obj, pid=pid) @asynchronous def registerWorkerRPC(self, pid, port): """Register the worker with `pid` has RPC `port` open.""" if pid in self.connections: @transactional def create_endpoints(result): pid, port = result process = self._getProcessObjFor(pid) self._updateEndpoints( process, self._getListenAddresses(port)) return result def set_result(result): pid, port = result self.connections[pid]['rpc']['port'] = port self.connections[pid]['rpc']['connections'] = {} return result def log_rpc_open(result): log.msg( "Worker pid:%d opened RPC listener on port:%s." % result) d = deferToDatabase(create_endpoints, (pid, port)) d.addCallback(set_result) d.addCallback(log_rpc_open) return d @synchronous @transactional def _registerConnection(self, process, ident, host, port, force_save=True): rackd = RackController.objects.get(system_id=ident) endpoint, _ = RegionControllerProcessEndpoint.objects.get_or_create( process=process, address=host, port=port) connection, created = RegionRackRPCConnection.objects.get_or_create( endpoint=endpoint, rack_controller=rackd) if not created and force_save: # Force the save so that signals connected to the # RegionRackRPCConnection are performed. connection.save(force_update=True) return connection def registerWorkerRPCConnection(self, pid, connid, ident, host, port): """Register the worker with `pid` has RPC an RPC connection.""" if pid in self.connections: @transactional def register_connection(pid, connid, ident, host, port): process = self._getProcessObjFor(pid) self._registerConnection(process, ident, host, port) return (pid, connid, ident, host, port) def log_connection(result): pid, conn = result[0], result[1:] log.msg( "Worker pid:%d registered RPC connection to %s." % ( pid, conn[1:])) return conn def set_result(conn): connid, conn = conn[0], conn[1:] self.connections[pid]['rpc']['connections'][connid] = conn d = deferToDatabase( register_connection, pid, connid, ident, host, port) d.addCallback(log_connection) d.addCallback(set_result) return d @transactional def _unregisterConnection(self, process, ident, host, port): """Unregister the connection into the database.""" try: endpoint = RegionControllerProcessEndpoint.objects.get( process=process, address=host, port=port) except RegionControllerProcessEndpoint.DoesNotExist: # Endpoint no longer exists, nothing to do. pass else: try: rackd = RackController.objects.get(system_id=ident) except RackController.DoesNotExist: # No rack controller, nothing to do. pass else: RegionRackRPCConnection.objects.filter( endpoint=endpoint, rack_controller=rackd).delete() def unregisterWorkerRPCConnection(self, pid, connid): """Unregister connection for worker with `pid`.""" if pid in self.connections: connections = self.connections[pid]['rpc']['connections'] conn = connections.get(connid, None) if conn is not None: @transactional def unregister_connection(pid, connid, ident, host, port): process = self._getProcessObjFor(pid) self._unregisterConnection(process, ident, host, port) return (pid, connid, ident, host, port) def log_disconnect(result): pid, conn = result[0], result[1:] log.msg( "Worker pid:%d lost RPC connection to %s." % ( pid, conn[1:])) return conn def set_result(conn): connid = conn[0] connections.pop(connid, None) d = deferToDatabase( unregister_connection, pid, connid, *conn) d.addCallback(log_disconnect) d.addCallback(set_result) return d @synchronous def _updateConnections(self, process, connections): """Update the existing RPC connections into this region. This is needed because the database could get in an incorrect state because another process removed its references in the database and the existing connections need to be re-created. """ if not connections: RegionRackRPCConnection.objects.filter( endpoint__process=process).delete() else: previous_connection_ids = set( RegionRackRPCConnection.objects.filter( endpoint__process=process).values_list( "id", flat=True)) for _, (ident, host, port) in connections.items(): db_conn = self._registerConnection( process, ident, host, port, force_save=False) previous_connection_ids.discard(db_conn.id) if previous_connection_ids: RegionRackRPCConnection.objects.filter( id__in=previous_connection_ids).delete() @synchronous def _updateService(self, region_obj): """Update the service status for this region.""" Service.objects.create_services_for(region_obj) number_of_processes = len(self.connections) not_running_count = workers.MAX_WORKERS_COUNT - number_of_processes if not_running_count > 0: if number_of_processes == 1: process_text = "process" else: process_text = "processes" Service.objects.update_service_for( region_obj, "regiond", SERVICE_STATUS.DEGRADED, "%d %s running but %d were expected." % ( number_of_processes, process_text, workers.MAX_WORKERS_COUNT)) else: Service.objects.update_service_for( region_obj, "regiond", SERVICE_STATUS.RUNNING, "") @synchronous @transactional def _update(self): """Repopulate the database with process, endpoint, and connection information.""" # Get the region controller and update its hostname and last # updated time. region_obj = RegionController.objects.get_running_controller() hostname = gethostname() if region_obj.hostname != hostname: region_obj.hostname = hostname region_obj.save() # Get all the existing processes for the region controller. This is # used to remove the old processes that we did not update. previous_process_ids = set( RegionControllerProcess.objects.filter( region=region_obj).values_list("id", flat=True)) # Loop through all the current workers to update the records in the # database. Caution is needed because other region controllers can # remove expired processes. for pid, conn in self.connections.items(): process = self._getProcessObjFor(pid) process.updated = now() process.save() if conn['rpc']['port']: # Update the endpoints for the provided port. self._updateEndpoints( process, self._getListenAddresses(conn['rpc']['port'])) else: # RPC is not running, no endpoints. self._updateEndpoints(process, []) self._updateConnections(process, conn['rpc']['connections']) previous_process_ids.discard(process.id) # Delete all the old processes that are dead. if previous_process_ids: RegionControllerProcess.objects.filter( id__in=previous_process_ids).delete() # Remove any old processes not owned by this controller. Every # controller should update its processes based on the `UPDATE_INTERVAL` # any that are older than `REMOVE_INTERVAL` are dropped. remove_before_time = now() - timedelta(seconds=self.REMOVE_INTERVAL) RegionControllerProcess.objects.exclude(region=region_obj).filter( updated__lte=remove_before_time).delete() # Update the status of this regiond service for this region based on # the number of running processes. self._updateService(region_obj) # Update the status of all regions that have no processes running. for other_region in RegionController.objects.exclude( system_id=region_obj.id).prefetch_related("processes"): # Use len with `all` so the prefetch cache is used. if len(other_region.processes.all()) == 0: Service.objects.mark_dead(other_region, dead_region=True) @asynchronous def update(self): def ignore_cancel(failure): failure.trap(CancelledError) d = deferToDatabase(self._update) d.addErrback(ignore_cancel) d.addErrback( log.err, "Failed to update regiond's processes and endpoints; " "%s record's may be out of date" % (eventloop.loop.name,)) return d
index = 0 except ValueError: pass # Pas la bonne accolade (JSON imbriqué) index = self.current_buffer.find('}', index + 1) pass pass class GProxyFactory(Factory): protocol = GProxyProtocol def __init__(self, remote): self.remote = remote pass endpoint = TCP4ServerEndpoint(reactor, 1863) gfactory = GreenhubFactory() endpoint.listen(gfactory) localendpoint = UNIXServerEndpoint(reactor, "/tmp/greenhub.sock") localendpoint.listen(GProxyFactory(gfactory)) debug("INFO", "Serveur démarré.") reactor.run()
# Disable running the uncompressed soxy, it is no longer needed. #args = ['./soxy/soxy', master_config.get('cert'), master_config.get('key'), str(master_config.getint('port')), os.getcwd() + '/collect-master.sock'] #logging.debug('Starting proxy with: %s', args) #reactor.spawnProcess(Socat(), './soxy/soxy', args=args, env=os.environ) args = [ './soxy/soxy', master_config.get('cert'), master_config.get('key'), master_config.get('ca'), str(master_config.getint('port_compression')), os.getcwd() + '/collect-master.sock', 'compress' ] logging.debug('Starting proxy with: %s', args) reactor.spawnProcess(Socat(), './soxy/soxy', args=args, env=os.environ) endpoint.listen( ClientFactory(plugins, frozenset(master_config.get('fastpings').split()))) logging.info('Init done') reactor.run() logging.info('Finishing up') pool.stop() if socat: soc = socat socat = None soc.signalProcess('TERM') activity.shutdown() logging.info('Shutdown done')
class IPCMasterService(service.Service, object): """ IPC master service. Provides the master side of the IPC communication between the workers. """ connections = None def __init__(self, reactor, workers=None, socket_path=None): super(IPCMasterService, self).__init__() self.reactor = reactor self.workers = workers self.socket_path = socket_path if self.socket_path is None: self.socket_path = get_ipc_socket_path() if os.path.exists(self.socket_path): os.remove(self.socket_path) self.endpoint = UNIXServerEndpoint(reactor, self.socket_path) self.port = None self.connections = {} self.factory = Factory.forProtocol(IPCMaster) self.factory.service = self @asynchronous def startService(self): """Start listening on UNIX socket.""" super(IPCMasterService, self).startService() self.starting = self.endpoint.listen(self.factory) def save_port(port): self.port = port def log_failure(failure): if failure.check(CancelledError): log.msg("IPCMasterService start-up has been cancelled.") else: log.err(failure, "IPCMasterService start-up failed.") self.starting.addCallback(save_port) self.starting.addErrback(log_failure) # Twisted's service framework does not track start-up progress, i.e. # it does not check for Deferreds returned by startService(). Here we # return a Deferred anyway so that direct callers (esp. those from # tests) can easily wait for start-up. return self.starting @asynchronous @inlineCallbacks def stopService(self): """Stop listening.""" self.starting.cancel() if self.port: self.port, port = None, self.port yield port.stopListening() for conn in self.connections.values(): try: yield conn.transport.loseConnection() except: log.err(None, "Failure when closing IPC connection.") yield super(IPCMasterService, self).stopService() def registerWorker(self, pid, conn): """Register the worker with `pid` using `conn`.""" self.connections[pid] = conn log.msg("Worker pid:%d IPC connected." % pid) def getPIDFromConnection(self, conn): """Get the PID from the connection.""" for pid, reg in self.connections.items(): if reg == conn: return pid def unregisterWorker(self, conn, reason): """Unregister the worker with `pid` because of `reason`.""" pid = self.getPIDFromConnection(conn) if pid: del self.connections[pid] log.msg("Worker pid:%d IPC disconnected." % pid) if self.workers: self.workers.killWorker(pid)
print e sys.exit(1) # Give process it's own session under init os.setsid() # Final fork try: pid = os.fork() if pid > 0: pid_file = open(working_dir + '/mailrfd.pid', 'w') pid_file.write(str(pid)) pid_file.close() sys.exit() except OSError, e: print e sys.exit(1) if __debug__: log.startLogging(sys.stderr) else: syslog.startLogging(prefix='mailrfd', options=LOG_PID, facility=LOG_MAIL) if __debug__: endpoint = TCP4ServerEndpoint(reactor, 8027, interface='localhost') else: endpoint = UNIXServerEndpoint(reactor, working_dir + '/socket') endpoint.listen(MailRfFactory()) reactor.run()
class SnifferGateway(object): def __init__(self, sock_name): self.server = UNIXServerEndpoint(reactor, sock_name) self.server.listen(RequestFactory())
reactor.stop() # Don't report lost proxy if we're already terminating logging.fatal('Lost proxy, terminating') except ReactorNotRunning: pass def errReceived(self, data): logging.warn('Proxy complained: %s', data) # Disable running the uncompressed soxy, it is no longer needed. #args = ['./soxy/soxy', master_config.get('cert'), master_config.get('key'), str(master_config.getint('port')), os.getcwd() + '/collect-master.sock'] #logging.debug('Starting proxy with: %s', args) #reactor.spawnProcess(Socat(), './soxy/soxy', args=args, env=os.environ) args = ['./soxy/soxy', master_config.get('cert'), master_config.get('key'), str(master_config.getint('port_compression')), os.getcwd() + '/collect-master.sock', 'compress'] logging.debug('Starting proxy with: %s', args) reactor.spawnProcess(Socat(), './soxy/soxy', args=args, env=os.environ) endpoint.listen(ClientFactory(plugins, frozenset(master_config.get('fastpings').split()))) logging.info('Init done') reactor.run() logging.info('Finishing up') pool.stop() if socat: soc = socat socat = None soc.signalProcess('TERM') activity.shutdown() logging.info('Shutdown done')
## create the endpoint ## server = UNIXServerEndpoint(reactor, path, backlog=backlog) else: raise ApplicationError( "crossbar.error.invalid_configuration", "invalid endpoint type '{}'".format(endpoint_config["type"]), ) except Exception as e: log.msg("endpoint creation failed: {}".format(e)) raise e d = server.listen(transport_factory) def ok(port): router.transport_no += 1 router.transports[router.transport_no] = RouterTransport(router.transport_no, config, port) return router.transport_no def fail(err): log.msg("cannot listen on endpoint: {}".format(err.value)) raise ApplicationError("crossbar.error.cannotlisten", str(err.value)) d.addCallbacks(ok, fail) return d else: # http://stackoverflow.com/questions/12542700/setsockopt-before-connect-for-reactor-connecttcp
'time': r[0], 'fields': [{ 'name': n, 'value': v } for n, v in zip(fields, r[1:])] } for r in result] out_json = json.dumps(out) print out_json self.transport.write(out_json) class DBFactory(Factory): def __init__(self, db_filename): self.db_filename = db_filename def startFactory(self): self.db_conn = lite.connect(self.db_filename) def stopFactory(self): self.db_conn.close() def buildProtocol(self, addr): return Database(self.db_conn) db_filename = 'scutu.db' endpoint = ServerEndpoint(reactor, './scutu.sock') endpoint.listen(DBFactory(db_filename)) reactor.run()
def buildProtocol(self, addr): return TraphProtocol(self.traph) def close(self): self.traph.close() if __name__ == "__main__": sock = sys.argv[1] corpus = sys.argv[2] try: with open(sock + "-options.json") as f: options = json.load(f) except: options = {} traph = TraphServerFactory(corpus, **options) endpoint = UNIXServerEndpoint(reactor, sock) server_listening_deferred = endpoint.listen(traph) @server_listening_deferred.addErrback def server_listening_failed(failure): print failure.value reactor.stop() @server_listening_deferred.addCallback def server_listen_callback(twisted_port): traph.ready() reactor.run()