コード例 #1
0
    def __init__(self, args=None):
        '''Args must be an object with the following attributes:
           foreground, logfile, mailbox, nClients, silent, socketpath, verbose
           Suitable defaults will be supplied.'''

        # Pass command line args to ProtocolIVSHMSG, then open logging.
        if args is None:
            args = argparse.Namespace()
        for arg, default in self._required_arg_defaults.items():
            setattr(args, arg, getattr(args, arg, default))

        # Mailbox may be sized above the requested number of clients to
        # satisfy QEMU IVSHMEM restrictions.
        args.server_id = args.nClients + 1
        args.nEvents = args.nClients + 2

        # It's a singleton so no reason to keep the instance, however it's
        # the way I wrote the Klein API server so...
        mb = MB(args=args)
        MailBoxReSTAPI(mb)
        shutdown_http_logging()

        if args.foreground:
            if args.verbose > 1:
                TPlog.startLogging(sys.stdout, setStdout=False)
            else:
                TPlog.startLogging(open('/dev/null', 'a'), setStdout=False)
        else:
            PRINT('Logging to %s' % args.logfile)
            TPlog.startLogging(
                DailyLogFile.fromFullPath(args.logfile),
                setStdout=True)  # "Pass-through" explicit print() for debug
        args.logmsg = TPlog.msg
        args.logerr = TPlog.err

        # By Twisted version 18, "mode=" is deprecated and you should just
        # inherit the tacky bit from the parent directory.  wantPID creates
        # <path>.lock as a symlink to "PID".
        E = UNIXServerEndpoint(
            TIreactor,
            args.socketpath,
            mode=0o666,  # Deprecated at Twisted 18
            wantPID=True)
        E.listen(self)
        args.logmsg(
            '%s server @%d ready for %d clients on %s' %
            (args.title, args.server_id, args.nClients, args.socketpath))

        # https://stackoverflow.com/questions/1411281/twisted-listen-to-multiple-ports-for-multiple-processes-with-one-reactor

        # Voodoo kick to a) set up one-time SI and b)setup commander.
        # Docs mislead, have to explicitly pass something to get persistent
        # state across protocol/transport invocations.  As there is only
        # one server object per process instantion, that's not necessary.

        protobj = ProtocolIVSHMSGServer(self, args)  # With "args"
        Commander(protobj)
コード例 #2
0
ファイル: mon.py プロジェクト: gregwill/mc
class Server:
    def __init__(self, addr, reactor, callback):
        self.addr = addr
        self.reactor = reactor
        self.factory = ServerFactory(callback)
        self.endpoint = UNIXServerEndpoint(reactor, addr)
        self.listen_deferred = self.endpoint.listen(self.factory)
        logging.info("mcmon server listening on {}".format(addr))

    def stop(self):
        self.stop_deferred = self.endpoint.stop_listening()
        return self.stop_deferred
コード例 #3
0
ファイル: ipc.py プロジェクト: laoyin/maas
 def __init__(self, reactor, workers=None, socket_path=None):
     super(IPCMasterService, self).__init__()
     self.reactor = reactor
     self.workers = workers
     self.socket_path = socket_path
     if self.socket_path is None:
         self.socket_path = get_ipc_socket_path()
     if os.path.exists(self.socket_path):
         os.remove(self.socket_path)
     self.endpoint = UNIXServerEndpoint(reactor, self.socket_path)
     self.port = None
     self.connections = {}
     self.factory = Factory.forProtocol(IPCMaster)
     self.factory.service = self
コード例 #4
0
 def __init__(self, name, socket_file, fields):
     """
     Construct the module with its *name*, and its *fields*. Also try to
     create a socket unix server endpoint on *socket_file* path.
     """
     threading.Thread.__init__(self)
     self.name = name
     self.fields = fields
     self.ready_file = path.ready_file(self.name)
     self.exitcode = self.SUCCESS_EXIT
     if os.path.exists(socket_file):
         os.remove(socket_file)
     endpoint = ServerEndpoint(reactor, socket_file)
     endpoint.listen(connection.Factory(self))
コード例 #5
0
 def server(self, reactor):
     """
     Construct a UNIX server endpoint.
     """
     # self.mktemp() often returns a path which is too long to be used.
     path = mktemp(suffix='.sock', dir='.')
     return UNIXServerEndpoint(reactor, path)
コード例 #6
0
ファイル: plugin.py プロジェクト: tjjh89017/maas
    def _makeTFTPService(self, tftp_root, tftp_port, rpc_service):
        """Create the dynamic TFTP service."""
        from provisioningserver.rackdservices.tftp import TFTPService
        tftp_service = TFTPService(resource_root=tftp_root,
                                   port=tftp_port,
                                   client_service=rpc_service)
        tftp_service.setName("tftp")

        # *** EXPERIMENTAL ***
        # https://code.launchpad.net/~allenap/maas/tftp-offload/+merge/312146
        # If the TFTP port has been set to zero, use the experimental offload
        # service. Otherwise stick to the normal in-process TFTP service.
        if tftp_port == 0:
            from provisioningserver.path import get_data_path
            from provisioningserver.rackdservices import tftp_offload
            from twisted.internet.endpoints import UNIXServerEndpoint
            tftp_offload_socket = get_data_path(
                "/var/lib/maas/tftp-offload.sock")
            tftp_offload_endpoint = UNIXServerEndpoint(reactor,
                                                       tftp_offload_socket,
                                                       wantPID=False)
            tftp_offload_service = tftp_offload.TFTPOffloadService(
                reactor, tftp_offload_endpoint, tftp_service.backend)
            tftp_offload_service.setName("tftp-offload")
            return tftp_offload_service
        # *** /EXPERIMENTAL ***

        return tftp_service
コード例 #7
0
ファイル: __init__.py プロジェクト: TurpIF/KHome
    def __call__(self, *args, **kwargs):
        obj = super(BaseMeta, self).__call__(*args, **kwargs)
        cls = type(obj)

        # Handle module name
        if not hasattr(obj, 'module_name'):
            if not hasattr(cls, 'module_name'):
                setattr(obj, 'module_name', cls.__name__)
            else:
                setattr(obj, 'module_name', cls.module_name)

        if obj.module_name in type(self).ls_name:
            raise NameError('Module with same name already exist')
        type(self).ls_name.add(obj.module_name)

        # Handle module socket (server side)
        setattr(obj, 'module_socket', path.socket_file(obj.module_name))
        try:
          os.remove(obj.module_socket)
        except OSError:
            pass
        endpoint = ServerEndpoint(reactor, obj.module_socket)
        endpoint.listen(connection.Factory(obj))

        # Handle module fields
        from khome.fields import Base as Field
        ls_fields = []
        for f_cls in cls.__dict__.keys():
            f_cls = getattr(cls, f_cls)
            if isinstance(f_cls, type) and issubclass(f_cls, Field):
                field = f_cls()
                setattr(obj, field.field_name, prop_field(field))
                setattr(field, 'module', obj)
                ls_fields += [field]
        setattr(obj, 'module_fields', ls_fields)

        # Logger
        setattr(obj, 'logger', logging.getLogger(obj.module_name))
        #setup_logger(obj.logger)

        _lauched_modules.append(obj)
        return obj
コード例 #8
0
ファイル: twisted_server.py プロジェクト: zvolchak/FAME-Z
    def __init__(self, args=None):
        '''Args must be an object with the following attributes:
           foreground, logfile, mailbox, nClients, silent, socketpath, verbose
           Suitable defaults will be supplied.'''

        # Pass command line args to ProtocolIVSHMSG, then open logging.
        if args is None:
            args = argparse.Namespace()
        for arg, default in self._required_arg_defaults.items():
            setattr(args, arg, getattr(args, arg, default))

        # Mailbox may be sized above the requested number of clients to
        # satisfy QEMU IVSHMEM restrictions.
        args.server_id = args.nClients + 1
        args.nEvents = args.nClients + 2
        FAMEZ_MailBox(args=args)  # singleton class, no need to keep instance

        self.cmdlineargs = args
        if args.foreground:
            TPlog.startLogging(sys.stdout, setStdout=False)
        else:
            PRINT('Logging to %s' % args.logfile)
            TPlog.startLogging(
                DailyLogFile.fromFullPath(args.logfile),
                setStdout=True)  # "Pass-through" explicit print() for debug
        args.logmsg = TPlog.msg
        args.logerr = TPlog.err

        # By Twisted version 18, "mode=" is deprecated and you should just
        # inherit the tacky bit from the parent directory.  wantPID creates
        # <path>.lock as a symlink to "PID".
        E = UNIXServerEndpoint(
            TIreactor,
            args.socketpath,
            mode=0o666,  # Deprecated at Twisted 18
            wantPID=True)
        E.listen(self)
        args.logmsg('FAME-Z server @%d ready for %d clients on %s' %
                    (args.server_id, args.nClients, args.socketpath))
コード例 #9
0
ファイル: endpoint.py プロジェクト: touilleMan/crossbar
def create_listening_endpoint_from_config(config, cbdir, reactor, log):
    """
    Create a Twisted stream server endpoint from a Crossbar.io transport configuration.

    See: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamServerEndpoint.html

    :param config: The transport configuration.
    :type config: dict
    :param cbdir: Crossbar.io node directory (we need this for TLS key/certificates).
    :type cbdir: str
    :param reactor: The reactor to use for endpoint creation.
    :type reactor: obj

    :returns obj -- An instance implementing IStreamServerEndpoint
    """
    endpoint = None

    # a TCP endpoint
    #
    if config['type'] == 'tcp':

        # the TCP protocol version (v4 or v6)
        #
        version = int(config.get('version', 4))

        # the listening port
        if type(config['port']) is six.text_type:
            # read port from environment variable ..
            try:
                port = int(environ[config['port'][1:]])
            except Exception as e:
                log.warn(
                    "Could not read listening port from env var: {}".format(e))
                raise e
        else:
            port = config['port']

        # the listening interface
        #
        interface = str(config.get('interface', '').strip())

        # the TCP accept queue depth
        #
        backlog = int(config.get('backlog', 50))

        if 'tls' in config:
            # create a TLS server endpoint
            #
            if _HAS_TLS:
                # TLS server context
                context = _create_tls_server_context(config['tls'], cbdir, log)

                if version == 4:
                    endpoint = SSL4ServerEndpoint(reactor,
                                                  port,
                                                  context,
                                                  backlog=backlog,
                                                  interface=interface)
                elif version == 6:
                    raise Exception("TLS on IPv6 not implemented")
                else:
                    raise Exception(
                        "invalid TCP protocol version {}".format(version))
            else:
                raise Exception(
                    "TLS transport requested, but TLS packages not available:\n{}"
                    .format(_LACKS_TLS_MSG))

        else:
            # create a non-TLS server endpoint
            #
            if version == 4:
                endpoint = TCP4ServerEndpoint(reactor,
                                              port,
                                              backlog=backlog,
                                              interface=interface)
            elif version == 6:
                endpoint = TCP6ServerEndpoint(reactor,
                                              port,
                                              backlog=backlog,
                                              interface=interface)
            else:
                raise Exception(
                    "invalid TCP protocol version {}".format(version))

    # a Unix Domain Socket endpoint
    #
    elif config['type'] == 'unix':

        # the accept queue depth
        #
        backlog = int(config.get('backlog', 50))

        # the path
        #
        path = FilePath(join(cbdir, config['path']))

        # if there is already something there, delete it.
        #
        if path.exists():
            log.info(("{path} exists, attempting to remove before using as a "
                      "UNIX socket"),
                     path=path)
            path.remove()

        # create the endpoint
        #
        endpoint = UNIXServerEndpoint(reactor, path.path, backlog=backlog)

    else:
        raise Exception("invalid endpoint type '{}'".format(config['type']))

    return endpoint
コード例 #10
0
ファイル: collect-master.py プロジェクト: bittomix/ucollect
logging.basicConfig(level=severity, format=master_config.get('log_format'))
if log_file != '-':
	handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=int(master_config.get('log_file_size')), backupCount=int(master_config.get('log_file_count')))
	handler.setFormatter(logging.Formatter(fmt=master_config.get('log_format')))
	logging.getLogger().addHandler(handler)

loaded_plugins = {}
plugins = Plugins()
for (plugin, config) in master_config.plugins().items():
	(modulename, classname) = plugin.rsplit('.', 1)
	module = importlib.import_module(modulename)
	constructor = getattr(module, classname)
	loaded_plugins[plugin] = constructor(plugins, config)
	logging.info('Loaded plugin %s from %s', loaded_plugins[plugin].name(), plugin)
# Some configuration, to load the port from?
endpoint = UNIXServerEndpoint(reactor, './collect-master.sock')

socat = None

class Socat(protocol.ProcessProtocol):
	def connectionMade(self):
		global socat
		socat = self.transport
		logging.info('Started proxy')

	def processEnded(self, status):
		global socat
		if socat:
			socat = None
			try:
				reactor.stop()
コード例 #11
0
ファイル: cloudserver.py プロジェクト: Hexaflou/GreenHub
                self.current_buffer = self.current_buffer[index + 1:]

                index = 0

            except ValueError:
                pass # Pas la bonne accolade (JSON imbriqué)

            index = self.current_buffer.find('}', index + 1)

        pass

    pass

class GProxyFactory(Factory):
    protocol = GProxyProtocol

    def __init__(self, remote):
        self.remote = remote
        pass

endpoint = TCP4ServerEndpoint(reactor, 1863)
gfactory = GreenhubFactory()
endpoint.listen(gfactory)

localendpoint = UNIXServerEndpoint(reactor, "/tmp/greenhub.sock")
localendpoint.listen(GProxyFactory(gfactory))

debug("INFO", "Serveur démarré.")
reactor.run()
コード例 #12
0
ファイル: mailrfd.py プロジェクト: justinjereza/MailRF
        # Give process it's own session under init
        os.setsid()

        # Final fork
        try:
            pid = os.fork()
            if pid > 0:
                pid_file = open(working_dir + '/mailrfd.pid', 'w')
                pid_file.write(str(pid))
                pid_file.close()
                sys.exit()
        except OSError, e:
            print e
            sys.exit(1)

    if __debug__:
        log.startLogging(sys.stderr)
    else:
        syslog.startLogging(prefix='mailrfd',
                            options=LOG_PID,
                            facility=LOG_MAIL)

    if __debug__:
        endpoint = TCP4ServerEndpoint(reactor, 8027, interface='localhost')
    else:
        endpoint = UNIXServerEndpoint(reactor, working_dir + '/socket')

    endpoint.listen(MailRfFactory())
    reactor.run()
コード例 #13
0
def create_listening_endpoint_from_config(config, cbdir, reactor):
    """
    Create a Twisted stream server endpoint from a Crossbar.io transport configuration.

    See: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamServerEndpoint.html

    :param config: The transport configuration.
    :type config: dict
    :param cbdir: Crossbar.io node directory (we need this for TLS key/certificates).
    :type cbdir: str
    :param reactor: The reactor to use for endpoint creation.
    :type reactor: obj

    :returns obj -- An instance implementing IStreamServerEndpoint
    """
    log = make_logger()
    endpoint = None

    # a TCP endpoint
    #
    if config['type'] == 'tcp':

        # the TCP protocol version (v4 or v6)
        #
        version = int(config.get('version', 4))

        # the listening port
        #
        if type(config['port']) is six.text_type:
            # read port from environment variable ..
            try:
                port = int(environ[config['port'][1:]])
            except Exception as e:
                print(
                    "Could not read listening port from env var: {}".format(e))
                raise e
        else:
            port = config['port']

        # the listening interface
        #
        interface = str(config.get('interface', '').strip())

        # the TCP accept queue depth
        #
        backlog = int(config.get('backlog', 50))

        if 'tls' in config:
            if _HAS_TLS:
                key_filepath = abspath(join(cbdir, config['tls']['key']))
                cert_filepath = abspath(
                    join(cbdir, config['tls']['certificate']))

                with open(key_filepath) as key_file:
                    with open(cert_filepath) as cert_file:

                        if 'dhparam' in config['tls']:
                            dhpath = FilePath(
                                abspath(join(cbdir, config['tls']['dhparam'])))
                            dh_params = DiffieHellmanParameters.fromFile(
                                dhpath)
                        else:
                            # XXX won't be doing ANY EDH
                            # curves... maybe make dhparam required?
                            # or do "whatever tlxctx was doing"
                            dh_params = None
                            log.warn(
                                "OpenSSL DH modes not active (no 'dhparam')")

                        # create a TLS context factory
                        #
                        key = key_file.read()
                        cert = cert_file.read()
                        ca_certs = None
                        if 'ca_certificates' in config['tls']:
                            ca_certs = []
                            for fname in config['tls']['ca_certificates']:
                                with open(fname, 'r') as f:
                                    ca_certs.append(
                                        Certificate.loadPEM(f.read()).original)

                        crossbar_ciphers = AcceptableCiphers.fromOpenSSLCipherString(
                            'ECDHE-RSA-AES128-GCM-SHA256:'
                            'DHE-RSA-AES128-GCM-SHA256:'
                            'ECDHE-RSA-AES128-SHA256:'
                            'DHE-RSA-AES128-SHA256:'
                            'ECDHE-RSA-AES128-SHA:'
                            'DHE-RSA-AES128-SHA')

                        ctx = CertificateOptions(
                            privateKey=KeyPair.load(
                                key, crypto.FILETYPE_PEM).original,
                            certificate=Certificate.loadPEM(cert).original,
                            verify=(ca_certs is not None),
                            caCerts=ca_certs,
                            dhParameters=dh_params,
                            acceptableCiphers=crossbar_ciphers,
                        )
                        if ctx._ecCurve is None:
                            log.warn(
                                "OpenSSL failed to set ECDH default curve")
                        else:
                            log.info(
                                "Ok, OpenSSL is using ECDH elliptic curve {curve}",
                                curve=ctx._ecCurve.snName,
                            )

                # create a TLS server endpoint
                #
                if version == 4:
                    endpoint = SSL4ServerEndpoint(reactor,
                                                  port,
                                                  ctx,
                                                  backlog=backlog,
                                                  interface=interface)
                elif version == 6:
                    raise Exception("TLS on IPv6 not implemented")
                else:
                    raise Exception(
                        "invalid TCP protocol version {}".format(version))
            else:
                raise Exception(
                    "TLS transport requested, but TLS packages not available:\n{}"
                    .format(_LACKS_TLS_MSG))

        else:
            # create a non-TLS server endpoint
            #
            if version == 4:
                endpoint = TCP4ServerEndpoint(reactor,
                                              port,
                                              backlog=backlog,
                                              interface=interface)
            elif version == 6:
                endpoint = TCP6ServerEndpoint(reactor,
                                              port,
                                              backlog=backlog,
                                              interface=interface)
            else:
                raise Exception(
                    "invalid TCP protocol version {}".format(version))

    # a Unix Domain Socket endpoint
    #
    elif config['type'] == 'unix':

        # the accept queue depth
        #
        backlog = int(config.get('backlog', 50))

        # the path
        #
        path = FilePath(join(cbdir, config['path']))

        # if there is already something there, delete it.
        #
        if path.exists():
            log.info(("{path} exists, attempting to remove before using as a "
                      "UNIX socket"),
                     path=path)
            path.remove()

        # create the endpoint
        #
        endpoint = UNIXServerEndpoint(reactor, path.path, backlog=backlog)

    else:
        raise Exception("invalid endpoint type '{}'".format(config['type']))

    return endpoint
コード例 #14
0
ファイル: server.py プロジェクト: medialab/hyphe
        print "READY"

    def buildProtocol(self, addr):
        return TraphProtocol(self.traph)

    def close(self):
        self.traph.close()

if __name__ == "__main__":
    sock = sys.argv[1]
    corpus = sys.argv[2]
    try:
        with open(sock+"-options.json") as f:
            options = json.load(f)
    except:
        options = {}
    traph = TraphServerFactory(corpus, **options)
    endpoint = UNIXServerEndpoint(reactor, sock)
    server_listening_deferred = endpoint.listen(traph)

    @server_listening_deferred.addErrback
    def server_listening_failed(failure):
        print failure.value
        reactor.stop()

    @server_listening_deferred.addCallback
    def server_listen_callback(twisted_port):
        traph.ready()

    reactor.run()
コード例 #15
0
 def __init__(self, sock_name):
     self.server = UNIXServerEndpoint(reactor, sock_name)
     self.server.listen(RequestFactory())
コード例 #16
0
                'time':
                r[0],
                'fields': [{
                    'name': n,
                    'value': v
                } for n, v in zip(fields, r[1:])]
            } for r in result]
        out_json = json.dumps(out)
        print out_json
        self.transport.write(out_json)


class DBFactory(Factory):
    def __init__(self, db_filename):
        self.db_filename = db_filename

    def startFactory(self):
        self.db_conn = lite.connect(self.db_filename)

    def stopFactory(self):
        self.db_conn.close()

    def buildProtocol(self, addr):
        return Database(self.db_conn)


db_filename = 'scutu.db'
endpoint = ServerEndpoint(reactor, './scutu.sock')
endpoint.listen(DBFactory(db_filename))
reactor.run()
コード例 #17
0
ファイル: endpoint.py プロジェクト: digulla/crossbar
def create_listening_endpoint_from_config(config, cbdir, reactor):
    """
   Create a Twisted stream server endpoint from a Crossbar.io transport configuration.

   See: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamServerEndpoint.html

   :param config: The transport configuration.
   :type config: dict
   :param cbdir: Crossbar.io node directory (we need this for TLS key/certificates).
   :type cbdir: str
   :param reactor: The reactor to use for endpoint creation.
   :type reactor: obj

   :returns obj -- An instance implementing IStreamServerEndpoint
   """
    endpoint = None

    ## a TCP endpoint
    ##
    if config['type'] == 'tcp':

        ## the TCP protocol version (v4 or v6)
        ##
        version = int(config.get('version', 4))

        ## the listening port
        ##
        if type(config['port']) in (str, unicode):
            ## read port from environment variable ..
            try:
                port = int(os.environ[config['port'][1:]])
            except Exception as e:
                print(
                    "Could not read listening port from env var: {}".format(e))
                raise e
        else:
            port = config['port']

        ## the listening interface
        ##
        interface = str(config.get('interface', '').strip())

        ## the TCP accept queue depth
        ##
        backlog = int(config.get('backlog', 50))

        if 'tls' in config:

            if _HAS_TLS:
                key_filepath = os.path.abspath(
                    os.path.join(cbdir, config['tls']['key']))
                cert_filepath = os.path.abspath(
                    os.path.join(cbdir, config['tls']['certificate']))

                with open(key_filepath) as key_file:
                    with open(cert_filepath) as cert_file:

                        if 'dhparam' in config['tls']:
                            dhparam_filepath = os.path.abspath(
                                os.path.join(cbdir, config['tls']['dhparam']))
                        else:
                            dhparam_filepath = None

                        ## create a TLS context factory
                        ##
                        key = key_file.read()
                        cert = cert_file.read()
                        ciphers = config['tls'].get('ciphers', None)
                        ctx = TlsServerContextFactory(
                            key,
                            cert,
                            ciphers=ciphers,
                            dhParamFilename=dhparam_filepath)

                ## create a TLS server endpoint
                ##
                if version == 4:
                    endpoint = SSL4ServerEndpoint(reactor,
                                                  port,
                                                  ctx,
                                                  backlog=backlog,
                                                  interface=interface)
                elif version == 6:
                    raise Exception("TLS on IPv6 not implemented")
                else:
                    raise Exception(
                        "invalid TCP protocol version {}".format(version))

            else:
                raise Exception(
                    "TLS transport requested, but TLS packages not available")

        else:
            ## create a non-TLS server endpoint
            ##
            if version == 4:
                endpoint = TCP4ServerEndpoint(reactor,
                                              port,
                                              backlog=backlog,
                                              interface=interface)
            elif version == 6:
                endpoint = TCP6ServerEndpoint(reactor,
                                              port,
                                              backlog=backlog,
                                              interface=interface)
            else:
                raise Exception(
                    "invalid TCP protocol version {}".format(version))

    ## a Unix Domain Socket endpoint
    ##
    elif config['type'] == 'unix':

        ## the accept queue depth
        ##
        backlog = int(config.get('backlog', 50))

        ## the path
        ##
        path = os.path.abspath(os.path.join(cbdir, config['path']))

        ## create the endpoint
        ##
        endpoint = UNIXServerEndpoint(reactor, path, backlog=backlog)

    else:
        raise Exception("invalid endpoint type '{}'".format(config['type']))

    return endpoint
コード例 #18
0
ファイル: router.py プロジェクト: rogererens/crossbar
                ## a Unix Domain Socket endpoint
                ##
                elif endpoint_config["type"] == "unix":

                    ## the accept queue depth
                    ##
                    backlog = int(endpoint_config.get("backlog", 50))

                    ## the path
                    ##
                    path = os.path.abspath(os.path.join(self._cbdir, endpoint_config["path"]))

                    ## create the endpoint
                    ##
                    server = UNIXServerEndpoint(reactor, path, backlog=backlog)

                else:
                    raise ApplicationError(
                        "crossbar.error.invalid_configuration",
                        "invalid endpoint type '{}'".format(endpoint_config["type"]),
                    )

            except Exception as e:
                log.msg("endpoint creation failed: {}".format(e))
                raise e

            d = server.listen(transport_factory)

            def ok(port):
                router.transport_no += 1
コード例 #19
0
ファイル: ipc.py プロジェクト: laoyin/maas
class IPCMasterService(service.Service, object):
    """
    IPC master service.

    Provides the master side of the IPC communication between the workers.
    """

    connections = None

    def __init__(self, reactor, workers=None, socket_path=None):
        super(IPCMasterService, self).__init__()
        self.reactor = reactor
        self.workers = workers
        self.socket_path = socket_path
        if self.socket_path is None:
            self.socket_path = get_ipc_socket_path()
        if os.path.exists(self.socket_path):
            os.remove(self.socket_path)
        self.endpoint = UNIXServerEndpoint(reactor, self.socket_path)
        self.port = None
        self.connections = {}
        self.factory = Factory.forProtocol(IPCMaster)
        self.factory.service = self

    @asynchronous
    def startService(self):
        """Start listening on UNIX socket."""
        super(IPCMasterService, self).startService()
        self.starting = self.endpoint.listen(self.factory)

        def save_port(port):
            self.port = port

        def log_failure(failure):
            if failure.check(CancelledError):
                log.msg("IPCMasterService start-up has been cancelled.")
            else:
                log.err(failure, "IPCMasterService start-up failed.")

        self.starting.addCallback(save_port)
        self.starting.addErrback(log_failure)

        # Twisted's service framework does not track start-up progress, i.e.
        # it does not check for Deferreds returned by startService(). Here we
        # return a Deferred anyway so that direct callers (esp. those from
        # tests) can easily wait for start-up.
        return self.starting

    @asynchronous
    @inlineCallbacks
    def stopService(self):
        """Stop listening."""
        self.starting.cancel()
        if self.port:
            self.port, port = None, self.port
            yield port.stopListening()
        for conn in self.connections.values():
            try:
                yield conn.transport.loseConnection()
            except:
                log.err(None, "Failure when closing IPC connection.")
        yield super(IPCMasterService, self).stopService()

    def registerWorker(self, pid, conn):
        """Register the worker with `pid` using `conn`."""
        self.connections[pid] = conn
        log.msg("Worker pid:%d IPC connected." % pid)

    def getPIDFromConnection(self, conn):
        """Get the PID from the connection."""
        for pid, reg in self.connections.items():
            if reg == conn:
                return pid

    def unregisterWorker(self, conn, reason):
        """Unregister the worker with `pid` because of `reason`."""
        pid = self.getPIDFromConnection(conn)
        if pid:
            del self.connections[pid]
            log.msg("Worker pid:%d IPC disconnected." % pid)
            if self.workers:
                self.workers.killWorker(pid)
コード例 #20
0
ファイル: mailrfd.py プロジェクト: justinjereza/MailRF
            print e
            sys.exit(1)

        # Give process it's own session under init
        os.setsid()

        # Final fork
        try:
            pid = os.fork()
            if pid > 0:
                pid_file = open(working_dir + '/mailrfd.pid', 'w')
                pid_file.write(str(pid))
                pid_file.close()
                sys.exit()
        except OSError, e:
            print e
            sys.exit(1)

    if __debug__:
        log.startLogging(sys.stderr)
    else:
        syslog.startLogging(prefix='mailrfd', options=LOG_PID, facility=LOG_MAIL)

    if __debug__:
        endpoint = TCP4ServerEndpoint(reactor, 8027, interface='localhost')
    else:
        endpoint = UNIXServerEndpoint(reactor, working_dir + '/socket')

    endpoint.listen(MailRfFactory())
    reactor.run()
コード例 #21
0
ファイル: endpoints.py プロジェクト: rheenen/txdbus
def getDBusEndpoints(reactor, busAddress, client=True):
    """
    Creates DBus endpoints.

    @param busAddress: 'session', 'system', or a valid bus address as defined by
                       the DBus specification. If 'session' (the default) or 'system'
                       is supplied, the contents of the DBUS_SESSION_BUS_ADDRESS or
                       DBUS_SYSTEM_BUS_ADDRESS environment variables will be used for
                       the bus address, respectively. If DBUS_SYSTEM_BUS_ADDRESS is not
                       set, the well-known address unix:path=/var/run/dbus/system_bus_socket
                       will be used.
    @type busAddress: C{string}
    
    @rtype: C{list} of L{twisted.internet.interfaces.IStreamServerEndpoint}
    @returns: A list of endpoint instances
    """

    if busAddress == 'session':
        addrString = os.environ.get('DBUS_SESSION_BUS_ADDRESS', None)
        if addrString is None:
            raise Exception('DBus Session environment variable not set')

    elif busAddress == 'system':
        addrString = os.environ.get(
            'DBUS_SYSTEM_BUS_ADDRESS',
            'unix:path=/var/run/dbus/system_bus_socket')

    else:
        addrString = busAddress

    #XXX Add documentation about extra key=value parameters in address string
    #    such as nonce-tcp vs tcp which use same endpoint class
    epl = list()

    for ep_addr in addrString.split(';'):
        d = dict()
        kind = None
        ep = None

        for c in ep_addr.split(','):
            if c.startswith('unix:'):
                kind = 'unix'
                c = c[5:]
            elif c.startswith('tcp:'):
                kind = 'tcp'
                c = c[4:]
            elif c.startswith('nonce-tcp:'):
                kind = 'tcp'
                c = c[10:]
                d['nonce-tcp'] = True
            elif c.startswith('launchd:'):
                kind = 'launchd'
                c = c[7:]

            if '=' in c:
                k, v = c.split('=')
                d[k] = v

        if kind == 'unix':
            if 'path' in d:
                path = d['path']
            elif 'tmpdir' in d:
                path = d['tmpdir'] + '/dbus-' + str(os.getpid())
            elif 'abstract' in d:
                path = '\0' + d['abstract']

            if client:
                ep = UNIXClientEndpoint(reactor, path=path)
            else:
                ep = UNIXServerEndpoint(reactor, address=path)

        elif kind == 'tcp':
            if client:
                ep = TCP4ClientEndpoint(reactor, d['host'], int(d['port']))
            else:
                ep = TCP4ServerEndpoint(reactor,
                                        int(d['port']),
                                        interface=d['host'])

        if ep:
            ep.dbus_args = d
            epl.append(ep)

    return epl
コード例 #22
0
class IPCMasterService(service.Service, object):
    """
    IPC master service.

    Provides the master side of the IPC communication between the workers.
    """

    UPDATE_INTERVAL = 60  # 60 seconds.

    REMOVE_INTERVAL = 90  # 90 seconds.

    connections = None

    def __init__(
            self, reactor, workers=None, socket_path=None):
        super(IPCMasterService, self).__init__()
        self.reactor = reactor
        self.workers = workers
        self.socket_path = socket_path
        if self.socket_path is None:
            self.socket_path = get_ipc_socket_path()
        if os.path.exists(self.socket_path):
            os.remove(self.socket_path)
        self.endpoint = UNIXServerEndpoint(reactor, self.socket_path)
        self.port = None
        self.connections = {}
        self.factory = Factory.forProtocol(IPCMaster)
        self.factory.service = self
        self.updateLoop = LoopingCall(self.update)

    @asynchronous
    def startService(self):
        """Start listening on UNIX socket and create the region controller."""
        super(IPCMasterService, self).startService()
        self.starting = self.endpoint.listen(self.factory)

        def save_port(port):
            self.port = port

        @transactional
        def create_region(result):
            RegionController.objects.get_or_create_running_controller()

        def start_update_loop(result):
            self.updateLoopDone = self.updateLoop.start(self.UPDATE_INTERVAL)

        def log_failure(failure):
            if failure.check(CancelledError):
                log.msg("IPCMasterService start-up has been cancelled.")
            else:
                log.err(failure, "IPCMasterService start-up failed.")

        self.starting.addCallback(save_port)
        self.starting.addCallback(partial(deferToDatabase, create_region))
        self.starting.addCallback(start_update_loop)
        self.starting.addErrback(log_failure)

        # Twisted's service framework does not track start-up progress, i.e.
        # it does not check for Deferreds returned by startService(). Here we
        # return a Deferred anyway so that direct callers (esp. those from
        # tests) can easily wait for start-up.
        return self.starting

    @asynchronous
    @inlineCallbacks
    def stopService(self):
        """Stop listening."""
        self.starting.cancel()
        if self.port:
            self.port, port = None, self.port
            yield port.stopListening()
        for data in self.connections.values():
            try:
                yield data['connection'].transport.loseConnection()
            except:
                log.err(None, "Failure when closing IPC connection.")

        @transactional
        def delete_all_processes():
            region = RegionController.objects.get_running_controller()
            region.processes.all().delete()

        @asynchronous
        def stop_update_loop():
            if self.updateLoop.running:
                self.updateLoop.stop()
                return self.updateLoopDone

        yield deferToDatabase(delete_all_processes)
        yield stop_update_loop()
        yield super(IPCMasterService, self).stopService()

    @asynchronous
    def registerWorker(self, pid, conn):
        """Register the worker with `pid` using `conn`."""

        @transactional
        def create_process(pid):
            region = RegionController.objects.get_running_controller()
            process, _ = RegionControllerProcess.objects.get_or_create(
                region=region, pid=pid)
            return (pid, process.id)

        def log_connected(result):
            pid, process_id = result
            log.msg("Worker pid:%d IPC connected." % pid)
            return result

        def add_to_connections(result):
            pid, process_id = result
            self.connections[pid] = {
                'process_id': process_id,
                'connection': conn,
                'rpc': {
                    'port': None,
                    'connections': set(),
                }
            }
            return process_id

        @transactional
        def update_service(process_id):
            region = RegionController.objects.get_running_controller()
            self._updateService(region)
            return process_id

        def return_result(process_id):
            return {
                'process_id': process_id,
            }

        d = deferToDatabase(create_process, pid)
        d.addCallback(log_connected)
        d.addCallback(add_to_connections)
        d.addCallback(partial(deferToDatabase, update_service))
        d.addCallback(return_result)
        return d

    def getPIDFromConnection(self, conn):
        """Get the PID from the connection."""
        for pid, data in self.connections.items():
            if data['connection'] == conn:
                return pid

    @asynchronous
    def unregisterWorker(self, conn, reason):
        """Unregister the worker with `pid` because of `reason`."""
        pid = self.getPIDFromConnection(conn)
        if pid:

            @transactional
            def delete_process(pid):
                process_id = self.connections[pid]['process_id']
                RegionControllerProcess.objects.filter(id=process_id).delete()
                return pid

            def remove_conn_kill_worker(pid):
                del self.connections[pid]
                if self.workers:
                    self.workers.killWorker(pid)
                return pid

            def log_disconnected(pid):
                log.msg("Worker pid:%d IPC disconnected." % pid)

            d = deferToDatabase(delete_process, pid)
            d.addCallback(remove_conn_kill_worker)
            d.addCallback(log_disconnected)
            return d

    def _getListenAddresses(self, port):
        """Return list of tuple (address, port) for the addresses the worker
        is listening on."""
        addresses = get_all_interface_source_addresses()
        if addresses:
            return set(
                (addr, port)
                for addr in addresses
            )
        # There are no non-loopback addresses, so return loopback
        # address as a fallback.
        loopback_addresses = set()
        for addr in get_all_interface_addresses():
            ipaddr = IPAddress(addr)
            if ipaddr.is_link_local():
                continue  # Don't advertise link-local addresses.
            if ipaddr.is_loopback():
                loopback_addresses.add((addr, port))
        return loopback_addresses

    @synchronous
    @transactional
    def _updateEndpoints(self, process, addresses):
        """Update the endpoints for `pid` and `port`."""
        previous_endpoint_ids = set(
            RegionControllerProcessEndpoint.objects.filter(
                process=process).values_list("id", flat=True))
        if addresses:
            for addr, port in addresses:
                endpoint, created = (
                    RegionControllerProcessEndpoint.objects.get_or_create(
                        process=process, address=addr, port=port))
                if not created:
                    previous_endpoint_ids.remove(endpoint.id)
        RegionControllerProcessEndpoint.objects.filter(
            id__in=previous_endpoint_ids).delete()

    @synchronous
    def _getProcessObjFor(self, pid):
        """Return `RegionControllerProcess` for `pid`."""
        process_id = self.connections[pid]['process_id']
        try:
            return RegionControllerProcess.objects.get(id=process_id)
        except RegionControllerProcess.DoesNotExist:
            region_obj = RegionController.objects.get_running_controller()
            return RegionControllerProcess.objects.create(
                id=process_id, region=region_obj, pid=pid)

    @asynchronous
    def registerWorkerRPC(self, pid, port):
        """Register the worker with `pid` has RPC `port` open."""
        if pid in self.connections:

            @transactional
            def create_endpoints(result):
                pid, port = result
                process = self._getProcessObjFor(pid)
                self._updateEndpoints(
                    process, self._getListenAddresses(port))
                return result

            def set_result(result):
                pid, port = result
                self.connections[pid]['rpc']['port'] = port
                self.connections[pid]['rpc']['connections'] = {}
                return result

            def log_rpc_open(result):
                log.msg(
                    "Worker pid:%d opened RPC listener on port:%s." % result)

            d = deferToDatabase(create_endpoints, (pid, port))
            d.addCallback(set_result)
            d.addCallback(log_rpc_open)
            return d

    @synchronous
    @transactional
    def _registerConnection(self, process, ident, host, port, force_save=True):
        rackd = RackController.objects.get(system_id=ident)
        endpoint, _ = RegionControllerProcessEndpoint.objects.get_or_create(
            process=process, address=host, port=port)
        connection, created = RegionRackRPCConnection.objects.get_or_create(
            endpoint=endpoint, rack_controller=rackd)
        if not created and force_save:
            # Force the save so that signals connected to the
            # RegionRackRPCConnection are performed.
            connection.save(force_update=True)
        return connection

    def registerWorkerRPCConnection(self, pid, connid, ident, host, port):
        """Register the worker with `pid` has RPC an RPC connection."""
        if pid in self.connections:

            @transactional
            def register_connection(pid, connid, ident, host, port):
                process = self._getProcessObjFor(pid)
                self._registerConnection(process, ident, host, port)
                return (pid, connid, ident, host, port)

            def log_connection(result):
                pid, conn = result[0], result[1:]
                log.msg(
                    "Worker pid:%d registered RPC connection to %s." % (
                        pid, conn[1:]))
                return conn

            def set_result(conn):
                connid, conn = conn[0], conn[1:]
                self.connections[pid]['rpc']['connections'][connid] = conn

            d = deferToDatabase(
                register_connection, pid, connid, ident, host, port)
            d.addCallback(log_connection)
            d.addCallback(set_result)
            return d

    @transactional
    def _unregisterConnection(self, process, ident, host, port):
        """Unregister the connection into the database."""
        try:
            endpoint = RegionControllerProcessEndpoint.objects.get(
                process=process, address=host, port=port)
        except RegionControllerProcessEndpoint.DoesNotExist:
            # Endpoint no longer exists, nothing to do.
            pass
        else:
            try:
                rackd = RackController.objects.get(system_id=ident)
            except RackController.DoesNotExist:
                # No rack controller, nothing to do.
                pass
            else:
                RegionRackRPCConnection.objects.filter(
                    endpoint=endpoint, rack_controller=rackd).delete()

    def unregisterWorkerRPCConnection(self, pid, connid):
        """Unregister connection for worker with `pid`."""
        if pid in self.connections:
            connections = self.connections[pid]['rpc']['connections']
            conn = connections.get(connid, None)
            if conn is not None:

                @transactional
                def unregister_connection(pid, connid, ident, host, port):
                    process = self._getProcessObjFor(pid)
                    self._unregisterConnection(process, ident, host, port)
                    return (pid, connid, ident, host, port)

                def log_disconnect(result):
                    pid, conn = result[0], result[1:]
                    log.msg(
                        "Worker pid:%d lost RPC connection to %s." % (
                            pid, conn[1:]))
                    return conn

                def set_result(conn):
                    connid = conn[0]
                    connections.pop(connid, None)

                d = deferToDatabase(
                    unregister_connection, pid, connid, *conn)
                d.addCallback(log_disconnect)
                d.addCallback(set_result)
                return d

    @synchronous
    def _updateConnections(self, process, connections):
        """Update the existing RPC connections into this region.

        This is needed because the database could get in an incorrect state
        because another process removed its references in the database and
        the existing connections need to be re-created.
        """
        if not connections:
            RegionRackRPCConnection.objects.filter(
                endpoint__process=process).delete()
        else:
            previous_connection_ids = set(
                RegionRackRPCConnection.objects.filter(
                    endpoint__process=process).values_list(
                    "id", flat=True))
            for _, (ident, host, port) in connections.items():
                db_conn = self._registerConnection(
                    process, ident, host, port, force_save=False)
                previous_connection_ids.discard(db_conn.id)
            if previous_connection_ids:
                RegionRackRPCConnection.objects.filter(
                    id__in=previous_connection_ids).delete()

    @synchronous
    def _updateService(self, region_obj):
        """Update the service status for this region."""
        Service.objects.create_services_for(region_obj)
        number_of_processes = len(self.connections)
        not_running_count = workers.MAX_WORKERS_COUNT - number_of_processes
        if not_running_count > 0:
            if number_of_processes == 1:
                process_text = "process"
            else:
                process_text = "processes"
            Service.objects.update_service_for(
                region_obj, "regiond", SERVICE_STATUS.DEGRADED,
                "%d %s running but %d were expected." % (
                    number_of_processes, process_text,
                    workers.MAX_WORKERS_COUNT))
        else:
            Service.objects.update_service_for(
                region_obj, "regiond", SERVICE_STATUS.RUNNING, "")

    @synchronous
    @transactional
    def _update(self):
        """Repopulate the database with process, endpoint, and connection
        information."""
        # Get the region controller and update its hostname and last
        # updated time.
        region_obj = RegionController.objects.get_running_controller()
        hostname = gethostname()
        if region_obj.hostname != hostname:
            region_obj.hostname = hostname
            region_obj.save()

        # Get all the existing processes for the region controller. This is
        # used to remove the old processes that we did not update.
        previous_process_ids = set(
            RegionControllerProcess.objects.filter(
                region=region_obj).values_list("id", flat=True))

        # Loop through all the current workers to update the records in the
        # database. Caution is needed because other region controllers can
        # remove expired processes.
        for pid, conn in self.connections.items():
            process = self._getProcessObjFor(pid)
            process.updated = now()
            process.save()
            if conn['rpc']['port']:
                # Update the endpoints for the provided port.
                self._updateEndpoints(
                    process, self._getListenAddresses(conn['rpc']['port']))
            else:
                # RPC is not running, no endpoints.
                self._updateEndpoints(process, [])
            self._updateConnections(process, conn['rpc']['connections'])
            previous_process_ids.discard(process.id)

        # Delete all the old processes that are dead.
        if previous_process_ids:
            RegionControllerProcess.objects.filter(
                id__in=previous_process_ids).delete()

        # Remove any old processes not owned by this controller. Every
        # controller should update its processes based on the `UPDATE_INTERVAL`
        # any that are older than `REMOVE_INTERVAL` are dropped.
        remove_before_time = now() - timedelta(seconds=self.REMOVE_INTERVAL)
        RegionControllerProcess.objects.exclude(region=region_obj).filter(
            updated__lte=remove_before_time).delete()

        # Update the status of this regiond service for this region based on
        # the number of running processes.
        self._updateService(region_obj)

        # Update the status of all regions that have no processes running.
        for other_region in RegionController.objects.exclude(
                system_id=region_obj.id).prefetch_related("processes"):
            # Use len with `all` so the prefetch cache is used.
            if len(other_region.processes.all()) == 0:
                Service.objects.mark_dead(other_region, dead_region=True)

    @asynchronous
    def update(self):

        def ignore_cancel(failure):
            failure.trap(CancelledError)

        d = deferToDatabase(self._update)
        d.addErrback(ignore_cancel)
        d.addErrback(
            log.err, "Failed to update regiond's processes and endpoints; "
            "%s record's may be out of date" % (eventloop.loop.name,))
        return d
コード例 #23
0
ファイル: collect-master.py プロジェクト: CZ-NIC/ucollect
handler_syslog = logging.handlers.SysLogHandler(address=('localhost', 514))
handler_syslog.setFormatter(
    logging.Formatter(fmt=master_config.get('syslog_format')))
logging.getLogger().addHandler(handler_syslog)

loaded_plugins = {}
plugins = Plugins()
for (plugin, config) in master_config.plugins().items():
    (modulename, classname) = plugin.rsplit('.', 1)
    module = importlib.import_module(modulename)
    constructor = getattr(module, classname)
    loaded_plugins[plugin] = constructor(plugins, config)
    logging.info('Loaded plugin %s from %s', loaded_plugins[plugin].name(),
                 plugin)
# Some configuration, to load the port from?
endpoint = UNIXServerEndpoint(reactor, './collect-master.sock')

socat = None


class Socat(protocol.ProcessProtocol):
    def connectionMade(self):
        global socat
        socat = self.transport
        logging.info('Started proxy')

    def processEnded(self, status):
        global socat
        if socat:
            socat = None
            try:
コード例 #24
0
class SnifferGateway(object):
    def __init__(self, sock_name):
        self.server = UNIXServerEndpoint(reactor, sock_name)
        self.server.listen(RequestFactory())
コード例 #25
0
ファイル: endpoint.py プロジェクト: wp4613/crossbar
def create_listening_endpoint_from_config(config, cbdir, reactor, log):
    """
    Create a Twisted stream server endpoint from a Crossbar.io transport configuration.

    See: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamServerEndpoint.html

    :param config: The transport configuration.
    :type config: dict
    :param cbdir: Crossbar.io node directory (we need this for TLS key/certificates).
    :type cbdir: str
    :param reactor: The reactor to use for endpoint creation.
    :type reactor: obj

    :returns obj -- An instance implementing IStreamServerEndpoint
    """
    endpoint = None

    # a TCP endpoint
    #
    if config['type'] == 'tcp':

        # the TCP protocol version (v4 or v6)
        #
        version = int(config.get('version', 4))

        # the listening port
        if isinstance(config['port'], str):
            # read port from environment variable ..
            try:
                port = int(environ[config['port'][1:]])
            except Exception as e:
                log.warn("Could not read listening port from env var: {e}",
                         e=e)
                raise
        else:
            port = config['port']

        # the listening interface
        #
        interface = str(config.get('interface', '').strip())

        # the TCP accept queue depth
        #
        backlog = int(config.get('backlog', 50))

        if 'tls' in config:
            # create a TLS server endpoint
            #
            if _HAS_TLS:
                # TLS server context
                context = _create_tls_server_context(config['tls'], cbdir, log)

                if version == 4:
                    endpoint = SSL4ServerEndpoint(reactor,
                                                  port,
                                                  context,
                                                  backlog=backlog,
                                                  interface=interface)
                elif version == 6:
                    raise Exception("TLS on IPv6 not implemented")
                else:
                    raise Exception(
                        "invalid TCP protocol version {}".format(version))
            else:
                raise Exception(
                    "TLS transport requested, but TLS packages not available:\n{}"
                    .format(_LACKS_TLS_MSG))

        else:
            # create a non-TLS server endpoint
            #
            if version == 4:
                endpoint = TCP4ServerEndpoint(reactor,
                                              port,
                                              backlog=backlog,
                                              interface=interface)
            elif version == 6:
                endpoint = TCP6ServerEndpoint(reactor,
                                              port,
                                              backlog=backlog,
                                              interface=interface)
            else:
                raise Exception(
                    "invalid TCP protocol version {}".format(version))

    # a Unix Domain Socket endpoint
    #
    elif config['type'] == 'unix':

        # the accept queue depth
        #
        backlog = int(config.get('backlog', 50))

        # the path
        #
        path = FilePath(join(cbdir, os.path.expandvars(config['path'])))

        # if there is already something there, delete it.
        #
        if path.exists():
            log.info(("{path} exists, attempting to remove before using as a "
                      "UNIX socket"),
                     path=path)
            path.remove()

        # create the endpoint
        #
        endpoint = UNIXServerEndpoint(reactor, path.path, backlog=backlog)

    # twisted endpoint-string
    elif config['type'] == 'twisted':
        endpoint = serverFromString(reactor, config['server_string'])

    # tor endpoint
    elif config['type'] == 'onion':  # or "tor"? r "tor_onion"?
        port = config['port']
        private_key_fname = _ensure_absolute(config['private_key_file'], cbdir)
        tor_control_ep = create_connecting_endpoint_from_config(
            config['tor_control_endpoint'], cbdir, reactor, log)
        version = config.get('version', 3)  # default to modern version 3

        try:
            with open(private_key_fname, 'r') as f:
                private_key = f.read().strip()
            log.info(
                "Onion private key from '{private_key_fname}'",
                private_key_fname=private_key_fname,
            )
        except (IOError, OSError):
            private_key = None

        @implementer(IStreamServerEndpoint)
        class _EphemeralOnion(object):
            @defer.inlineCallbacks
            def listen(self, proto_factory):
                # we don't care which local TCP port we listen on, but
                # we do need to know it
                local_ep = TCP4ServerEndpoint(reactor,
                                              0,
                                              interface="127.0.0.1")
                target_port = yield local_ep.listen(proto_factory)
                tor = yield txtorcon.connect(
                    reactor,
                    tor_control_ep,
                )

                log.info(
                    "Creating onion service (descriptor upload can take 30s or more)"
                )
                hs = yield tor.create_onion_service(
                    ports=[
                        (port, target_port.getHost().port),
                    ],
                    private_key=private_key,
                    version=version,
                )

                # if it's new, store our private key
                # XXX better "if private_key is None"?
                if not exists(private_key_fname):
                    with open(private_key_fname, 'w') as f:
                        f.write(hs.private_key)
                    log.info("Wrote private key to '{fname}'",
                             fname=private_key_fname)

                log.info(
                    "Listening on Tor onion service {hs.hostname} "
                    " with ports: {ports}",
                    hs=hs,
                    ports=" ".join(hs.ports),
                )
                defer.returnValue(target_port)

        endpoint = _EphemeralOnion()

    else:
        raise Exception("invalid endpoint type '{}'".format(config['type']))

    return endpoint
コード例 #26
0
ファイル: server.py プロジェクト: TurpIF/KHome
                WHERE time < ?  AND time > ? ORDER BY time DESC' \
                % (', '.join(fields), table_name)
        result = self.sql_query(query, (time_to, time_from), True)
        out = {'success': False}
        if result:
            out = {}
            out['success'] = True
            out['data'] = [{'time': r[0], 'fields': [{'name': n, 'value': v}
                    for n, v in zip(fields, r[1:])]} for r in result]
        out_json = json.dumps(out)
        print out_json
        self.transport.write(out_json)

class DBFactory(Factory):
    def __init__(self, db_filename):
        self.db_filename = db_filename

    def startFactory(self):
        self.db_conn = lite.connect(self.db_filename)

    def stopFactory(self):
        self.db_conn.close()

    def buildProtocol(self, addr):
        return Database(self.db_conn)

db_filename = 'scutu.db'
endpoint = ServerEndpoint(reactor, './scutu.sock')
endpoint.listen(DBFactory(db_filename))
reactor.run()
コード例 #27
0
                index = 0

            except ValueError:
                pass  # Pas la bonne accolade (JSON imbriqué)

            index = self.current_buffer.find('}', index + 1)

        pass

    pass


class GProxyFactory(Factory):
    protocol = GProxyProtocol

    def __init__(self, remote):
        self.remote = remote
        pass


endpoint = TCP4ServerEndpoint(reactor, 1863)
gfactory = GreenhubFactory()
endpoint.listen(gfactory)

localendpoint = UNIXServerEndpoint(reactor, "/tmp/greenhub.sock")
localendpoint.listen(GProxyFactory(gfactory))

debug("INFO", "Serveur démarré.")
reactor.run()
コード例 #28
0
    def buildProtocol(self, addr):
        return TraphProtocol(self.traph)

    def close(self):
        self.traph.close()


if __name__ == "__main__":
    sock = sys.argv[1]
    corpus = sys.argv[2]
    try:
        with open(sock + "-options.json") as f:
            options = json.load(f)
    except:
        options = {}
    traph = TraphServerFactory(corpus, **options)
    endpoint = UNIXServerEndpoint(reactor, sock)
    server_listening_deferred = endpoint.listen(traph)

    @server_listening_deferred.addErrback
    def server_listening_failed(failure):
        print failure.value
        reactor.stop()

    @server_listening_deferred.addCallback
    def server_listen_callback(twisted_port):
        traph.ready()

    reactor.run()