def __init__(self, server, sock, address): self._rip, self._rport = address self._lip, self._lport = sock.getsockname() self._server = server self._config = server.config self._socket = sock self._file = sock.makefile() self._rhost = socket.getfqdn(self._rip) self._lhost = socket.getfqdn(self._lip) self._timeout = Timeout(30, error.TimeoutError) self._hello = None self._hello_host = '' self._relay_client = False self._connected = True self._transaction = None # Generate a unique identifier for this connection sha_hash = hashlib.sha1(self._rip) sha_hash.update(str(time.time())) sha_hash.update(str(random.getrandbits(64))) self._cid = sha_hash.hexdigest() log.connection_id = self._cid[:7] # Add all the command controller methods self._commands = dict([(c, getattr(self, c)) for c in dir(self) if getattr(getattr(self, c), '_is_command', False)])
def __init__(self, server, conn, addr, data_size_limit=1024000): self.server = server self.conn = conn self.addr = addr self.line = [] self.state = self.COMMAND self.seen_greeting = 0 self.mailfrom = None self.rcpttos = [] self.data = "" self.fqdn = socket.getfqdn() self.ac_in_buffer_size = 4096 self.ac_in_buffer = "" self.closed = False self.data_size_limit = data_size_limit # in byte self.current_size = 0 self.tls = False try: self.peer = conn.getpeername() except socket.error as err: # a race condition may occur if the other end is closing # before we can get the peername logger.error(err) self.conn.close() if err[0] != errno.ENOTCONN: raise return self.push("220 %s GSMTPD at your service" % self.fqdn) self.terminator = "\r\n" logger.debug("SMTP channel initialized")
def run_job(self, fqdn, cmd, args): if fqdn != socket.getfqdn(): raise Unavailable() jid = uuid.uuid4().__str__() self._jobs[jid] = gevent.spawn(lambda: run_job_thread(self, jid, cmd, args)) return jid
def connect(self, host=None, port=None, timeout=None): local_host = host or self._host local_port = port or self._port local_timeout = timeout if timeout is not None else self._timeout if self._connected: prev_ip_host,prev_port = s.getpeername() try: prev_name, aliaslist, _ = socket.gethostbyaddr(prev_ip_host) except socket.herror: prev_name = prev_ip_host fqdn_host = socket.getfqdn(host) if(port != prev_port or (fqdn_host != prev_name and name not in aliaslist)): self.close() with self._lock: if self._connected: return True self._fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM) err_msg = "timeout on command(%s, %d)" % (local_host,local_port) with gevent.Timeout(local_timeout, CommandTimeout(err_msg)): self._fd.connect((local_host, local_port)) self._fd.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self._fd.setsockopt(socket.SOL_IP, socket.IP_TOS, 0x10) self._host = local_host self._port = local_port self._raw_read_task = gevent.spawn(self._raw_read,weakref.proxy(self),self._fd) self._connected = True return True
def __init__(self, server, conn, addr, data_size_limit=1024000): self.server = server self.conn = conn self.addr = addr self.line = [] self.state = self.COMMAND self.seen_greeting = 0 self.mailfrom = None self.rcpttos = [] self.data = '' self.fqdn = socket.getfqdn() self.ac_in_buffer_size = 4096 self.ac_in_buffer = '' self.closed = False self.data_size_limit = data_size_limit # in byte self.current_size = 0 self.tls = False try: self.peer = conn.getpeername() except socket.error as err: # a race condition may occur if the other end is closing # before we can get the peername logger.error(err) self.conn.close() if err[0] != errno.ENOTCONN: raise return self.push('220 %s GSMTPD at your service' % self.fqdn) self.terminator = '\r\n' logger.debug('SMTP channel initialized')
def HAVE_DATA(self, reply, data, err): if isinstance(err, MessageTooBig): reply.code = '552' reply.message = '5.3.4 Message exceeded size limit' return elif err: raise err self.envelope.client['ip'] = self.address[0] self.envelope.client['host'] = self.reverse_address self.envelope.client['name'] = self.ehlo_as self.envelope.client['protocol'] = self.protocol if hasattr(self, '_ptr_lookup_thread'): self._ptr_lookup_thread.kill(block=False) self.envelope.receiver = getfqdn() self.envelope.timestamp = time.time() self.envelope.parse(data) results = self.handoff(self.envelope) if isinstance(results[0][1], QueueError): reply.code = '550' reply.message = '5.6.0 Error queuing message' elif isinstance(results[0][1], RelayError): relay_reply = results[0][1].reply reply.copy(relay_reply) else: reply.message = '2.6.0 Message accepted for delivery' self.envelope = None
def __init__(self, url, pool_size=None, tls=None, ehlo_as=None, timeout=None, idle_timeout=None): super(HttpRelay, self).__init__(pool_size) self.url = url self.tls = tls self.ehlo_as = ehlo_as or socket.getfqdn() self.timeout = timeout self.idle_timeout = idle_timeout
def run_job(self, fqdn, cmd, args): if fqdn != socket.getfqdn(): raise Unavailable() jid = uuid.uuid4().__str__() self._jobs[jid] = gevent.spawn( lambda: run_job_thread(self, jid, cmd, args)) return jid
def __init__(self): self.subscribed = 0 self._generator = None self.fqdn = socket.getfqdn() self.hostname = socket.gethostname() self._events = Queue() self.register()
def get_fqdn_by_ip(ip_address, default, log_msg_prefix): # type: (str, str) -> str try: host = socket.gethostbyaddr(ip_address)[0] return socket.getfqdn(host) except Exception: logger.warn('%s exception in FQDN lookup `%s`', log_msg_prefix, format_exc()) return '(unknown-{}-fqdn)'.format(default)
def update_environ(self): address = self.address if isinstance(address, tuple): if 'SERVER_NAME' not in self.environ: try: name = socket.getfqdn(address[0]) except socket.error: name = str(address[0]) self.environ['SERVER_NAME'] = name self.environ.setdefault('SERVER_PORT', str(address[1])) else: self.environ.setdefault('SERVER_NAME', '') self.environ.setdefault('SERVER_PORT', '')
def main(options): # check config: if not os.path.exists(options['BUILD_SCRIPT']): sys.exit("BUILD_SCRIPT does not exist") format = '%(levelname)-8s %(name)s: %(message)s' logging.basicConfig(level=logging.DEBUG, format=format) builder = Builder(logging.getLogger('builder'), options['BUILD_SCRIPT']) environ = {'SERVER_NAME': options.get('SERVER_NAME', socket.getfqdn()), 'SERVER_PORT': options['PORT']} app = API(logging.getLogger('api'), environ, options['IMAGE_DIR'], builder) logging.info("Start serving requests on port %d" % ( int(options['PORT']),)) return pywsgi.WSGIServer(('', int(options['PORT'])), app)
def run_job_thread(generator, jid, cmd, args): success = True try: result = run_job(cmd, args) except: success = False result = traceback.format_exc() result = MsgEvent(JOB, { 'id': socket.getfqdn(), 'jid': jid, 'success': success, 'return': result, 'fun': cmd, 'fun_args': args }) generator.complete(jid, result)
def run_job_thread(generator, jid, cmd, args): success = True try: result = run_job(cmd, args) except: success = False result = traceback.format_exc() result = MsgEvent( JOB, { 'id': socket.getfqdn(), 'jid': jid, 'success': success, 'return': result, 'fun': cmd, 'fun_args': args }) generator.complete(jid, result)
def update_environ(self): """ Called before the first request is handled to fill in WSGI environment values. This includes getting the correct server name and port. """ address = self.address if isinstance(address, tuple): if 'SERVER_NAME' not in self.environ: try: name = socket.getfqdn(address[0]) except socket.error: name = str(address[0]) if PY3 and not isinstance(name, str): name = name.decode('ascii') self.environ['SERVER_NAME'] = name self.environ.setdefault('SERVER_PORT', str(address[1])) else: self.environ.setdefault('SERVER_NAME', '') self.environ.setdefault('SERVER_PORT', '')
def __init__(self, server, conn, addr): self.__datastate = False self.__close = False self.__server = server self.__conn = conn self.__file = conn.makefile() self.__addr = addr self.__greeting = 0 self.__mailfrom = None self.__rcpttos = [] self.__fqdn = socket.getfqdn() try: self.__peer = conn.getpeername() except socket.error, err: # a race condition may occur if the other end is closing # before we can get the peername self.close() if err[0] != errno.ENOTCONN: raise return
def __init__(self, host='', port=0, local_hostname=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): """Initialize a new instance. If specified, `host' is the name of the remote host to which to connect. If specified, `port' specifies the port to which to connect. By default, smtplib.SMTP_PORT is used. If a host is specified the connect method is called, and if it returns anything other than a success code an SMTPConnectError is raised. If specified, `local_hostname` is used as the FQDN of the local host for the HELO/EHLO command. Otherwise, the local hostname is found using socket.getfqdn(). """ self.timeout = timeout self.esmtp_features = {} if host: (code, msg) = self.connect(host, port) if code != 220: raise SMTPConnectError(code, msg) if local_hostname is not None: self.local_hostname = local_hostname else: # RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and # if that can't be calculated, that we should use a domain literal # instead (essentially an encoded IP address like [A.B.C.D]). fqdn = socket.getfqdn() if '.' in fqdn: self.local_hostname = fqdn else: # We can't find an fqdn hostname, so use a domain literal addr = '127.0.0.1' try: addr = socket.gethostbyname(socket.gethostname()) except socket.gaierror: pass self.local_hostname = '[%s]' % addr
def __init__(self, server, socket_, address): self.name = socket.getfqdn() self.server = server self._log = server._log self.peer = address self.stream = socket_.makefile() self.stream.write("%s SMTP RELAY\r\n" % self.name) self.stream.flush() self.from_ = None self.to = [] self.run = True while self.run: line = self.stream.readline() parts = line.split(None, 1) self._log.debug("<<<" + line) method = self.lookupMethod(parts[0]) or self.do_UNKNOWN method(parts.pop().split(':', 1).pop().rstrip('\r\n')) socket_.close()
def __init__(self, config): node.Actor.__init__(self, config) self.domain_name = config.get('domain', socket.getfqdn())
# GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, write to: # The Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor # Boston, MA 02110-1301, USA. # from gevent import socket from cStringIO import StringIO from vsmtpd.connection import command from vsmtpd.connection import Connection from vsmtpd.tests.common import TestCase localhost = socket.getfqdn('127.0.0.1') class Socket(object): def getsockname(self): return ('127.0.0.1', 25) def makefile(self): return StringIO() class Server(object): config = {} class ConnectionTestCase(TestCase):
def fakefqdn(self, name): if self.fqdn_cache: return self.fqdn_cache else: self.fqdn_cache = socket.getfqdn(name) return self.fqdn_cache
if sys.argv[1].startswith('main'): from main import app elif sys.argv[1].startswith('chat'): from chat import app except ImportError: sys.exit("Usage: python run_server.py (main|chat) [--debug]") if '--debug' in sys.argv: app.debug = True socket_path = '/tmp/'+sys.argv[1]+'.sock' # Delete the socket file if it already exists. try: os.remove(socket_path) except OSError: pass sock = socket(AF_UNIX) sock.bind(socket_path) sock.setblocking(0) sock.listen(256) os.chmod(socket_path, 0777) http_server = WSGIServer(sock, app) # yeah this is a hack. http_server.environ['SERVER_NAME'] = getfqdn() http_server.serve_forever()
def main(): """Run the daemon from the command line""" opts = parse_arguments(sys.argv[1:]) # Initialize logger lvl = logging.DEBUG if opts.debug else logging.INFO global logger logger = logging.getLogger("vncauthproxy") logger.setLevel(lvl) formatter = logging.Formatter(("%(asctime)s %(module)s[%(process)d] " " %(levelname)s: %(message)s"), "%Y-%m-%d %H:%M:%S") handler = logging.FileHandler(opts.log_file) handler.setFormatter(formatter) logger.addHandler(handler) try: # Create pidfile pidf = pidlockfile.TimeoutPIDLockFile(opts.pid_file, 10) # Init ephemeral port pool ports = range(opts.min_port, opts.max_port + 1) # Init VncAuthProxy class attributes VncAuthProxy.server_timeout = opts.server_timeout VncAuthProxy.connect_retries = opts.connect_retries VncAuthProxy.retry_wait = opts.retry_wait VncAuthProxy.connect_timeout = opts.connect_timeout VncAuthProxy.ports = ports VncAuthProxy.authdb = parse_auth_file(opts.auth_file) VncAuthProxy.keyfile = opts.key_file VncAuthProxy.certfile = opts.cert_file VncAuthProxy.proxy_address = opts.proxy_listen_address VncAuthProxy.fqdn = socket.getfqdn() sockets = get_listening_sockets(opts.listen_port, opts.listen_address, reuse_addr=True) wrap_ssl = lambda sock: sock if opts.enable_ssl: ssl_prot = ssl.PROTOCOL_TLSv1 wrap_ssl = lambda sock: ssl.wrap_socket(sock, server_side=True, keyfile=opts.key_file, certfile=opts.cert_file, ssl_version=ssl_prot) # Become a daemon: # Redirect stdout and stderr to handler.stream to catch # early errors in the daemonization process [e.g., pidfile creation] # which will otherwise go to /dev/null. daemon_context = AllFilesDaemonContext( pidfile=pidf, umask=0022, stdout=handler.stream, stderr=handler.stream, files_preserve=[handler.stream]) # Remove any stale PID files, left behind by previous invocations if daemon.runner.is_pidfile_stale(pidf): logger.warning("Removing stale PID lock file %s", pidf.path) pidf.break_lock() try: daemon_context.open() except (AlreadyLocked, LockTimeout): raise InternalError(("Failed to lock PID file %s, another " "instance running?"), pidf.path) logger.info("Became a daemon") # A fork() has occured while daemonizing, # we *must* reinit gevent gevent.reinit() # Catch signals to ensure graceful shutdown, # # Uses gevent.signal so the handler fires even during # gevent.socket.accept() gevent.signal(SIGINT, fatal_signal_handler, "SIGINT") gevent.signal(SIGTERM, fatal_signal_handler, "SIGTERM") except InternalError as err: logger.critical(err) sys.exit(1) except Exception as err: logger.critical("Unexpected error:") logger.exception(err) sys.exit(1) while True: try: client = None rlist, _, _ = select(sockets, [], []) for ctrl in rlist: client, _ = ctrl.accept() client = wrap_ssl(client) logger.info("New control connection") VncAuthProxy.spawn(logger, client) continue except Exception as err: logger.error("Unexpected error:") logger.exception(err) if client: client.close() continue except SystemExit: break try: logger.info("Closing control sockets") while sockets: sock = sockets.pop() sock.close() daemon_context.close() sys.exit(0) except Exception as err: logger.critical("Unexpected error:") logger.exception(err) sys.exit(1)
def _init(self): # Python-level ID contains all the core details, our own ID and that of the thread (greenlet) that creates us _current_thread = current_thread() python_id = '{}.{}.{}'.format(hex(id(self)), _current_thread.name, hex(_current_thread.ident)) # Assign core attributes to this object before calling parent class self.python_id = python_id # Must be set here and then to True later on because our parent class may already want # to accept connections, and we need to postpone their processing until we are initialized fully. self._initialized = False self.has_session_opened = False self._token = None self.update_lock = RLock() self.ext_client_id = None self.ext_client_name = None self.connection_time = self.last_seen = datetime.utcnow() self.sec_type = self.config.sec_type self.pings_missed = 0 self.pings_missed_threshold = self.config.get('pings_missed_threshold', 5) self.ping_last_response_time = None self.user_data = Bunch() # Arbitrary user-defined data self._disconnect_requested = False # Have we been asked to disconnect this client? # Manages access to service hooks if self.config.hook_service: self.hook_tool = HookTool(self.config.parallel_server, HookCtx, hook_type_to_method, self.invoke_service) self.on_connected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_CONNECTED) self.on_disconnected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_DISCONNECTED) self.on_pubsub_response_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_PUBSUB_RESPONSE) else: self.hook_tool = None self.on_connected_service_invoker = None self.on_disconnected_service_invoker = None self.on_pubsub_response_service_invoker = None # For publish/subscribe over WSX self.pubsub_tool = PubSubTool(self.config.parallel_server.worker_store.pubsub, self, PUBSUB.ENDPOINT_TYPE.WEB_SOCKETS.id, self.deliver_pubsub_msg) # Active WebSocket client ID (WebSocketClient model, web_socket_client.id in SQL) self._sql_ws_client_id = None # For tokens assigned externally independent of our WS-level self.token. # Such tokens will be generated by Vault, for instance. self.ext_token = None # Drop WSGI keys pointing to complex Python objects such as sockets for name in _wsgi_drop_keys: self.initial_http_wsgi_environ.pop(name, None) # Responses to previously sent requests - keyed by request IDs self.responses_received = {} _local_address = self.sock.getsockname() self._local_address = '{}:{}'.format(_local_address[0], _local_address[1]) _peer_address = self.sock.getpeername() self._peer_address = '{}:{}'.format(_peer_address[0], _peer_address[1]) self.forwarded_for = self.initial_http_wsgi_environ.get('HTTP_X_FORWARDED_FOR') if self.forwarded_for: self.forwarded_for_fqdn = socket.getfqdn(self.forwarded_for) else: self.forwarded_for_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN _peer_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN try: self._peer_host = socket.gethostbyaddr(_peer_address[0])[0] _peer_fqdn = socket.getfqdn(self._peer_host) except Exception: logger.warn(format_exc()) finally: self._peer_fqdn = _peer_fqdn self.peer_conn_info_pretty = self.get_peer_info_pretty() self._parse_func = { DATA_FORMAT.JSON: self.parse_json, DATA_FORMAT.XML: self.parse_xml, }[self.config.data_format] # All set, we can process connections now self._initialized = True
def __init__(self, hostname=None): self.hostname = hostname or getfqdn()
def __init__(self, queue, hostname=None): super(Edge, self).__init__() self.queue = queue self.hostname = hostname or getfqdn()
import gevent from gevent import Timeout, Greenlet from gevent.socket import create_connection, getfqdn from gevent.socket import error as socket_error from slimta.smtp import SmtpError from slimta.smtp.reply import Reply from slimta.smtp.client import Client from slimta import logging from ..pool import RelayPoolClient from . import SmtpRelayError __all__ = ['SmtpRelayClient'] log = logging.getSocketLogger(__name__) hostname = getfqdn() class SmtpRelayClient(RelayPoolClient): def __init__(self, address, queue, socket_creator=None, ehlo_as=None, tls=None, tls_immediately=False, tls_required=False, tls_wrapper=None, connect_timeout=10.0, command_timeout=10.0, data_timeout=None, idle_timeout=None, credentials=None): super(SmtpRelayClient, self).__init__(queue, idle_timeout) self.address = address if socket_creator: self._socket_creator = socket_creator self.socket = None
import gevent from gevent import Timeout, Greenlet from gevent.socket import create_connection, getfqdn from gevent.socket import error as socket_error from slimta.smtp import SmtpError from slimta.smtp.reply import Reply from slimta.smtp.client import Client from slimta import logging from ..pool import RelayPoolClient from . import SmtpRelayError __all__ = ['SmtpRelayClient'] log = logging.getSocketLogger(__name__) hostname = getfqdn() class SmtpRelayClient(RelayPoolClient): def __init__(self, address, queue, socket_creator=None, ehlo_as=None, tls=None, tls_immediately=False, tls_required=False, tls_wrapper=None, connect_timeout=10.0, command_timeout=10.0, data_timeout=None,
def main(): """Run the daemon from the command line""" opts = parse_arguments(sys.argv[1:]) # Initialize logger lvl = logging.DEBUG if opts.debug else logging.INFO global logger logger = logging.getLogger("vncauthproxy") logger.setLevel(lvl) formatter = logging.Formatter(("%(asctime)s %(module)s[%(process)d] " " %(levelname)s: %(message)s"), "%Y-%m-%d %H:%M:%S") handler = logging.FileHandler(opts.log_file) handler.setFormatter(formatter) logger.addHandler(handler) try: # Create pidfile pidf = pidlockfile.TimeoutPIDLockFile(opts.pid_file, 10) # Init ephemeral port pool ports = range(opts.min_port, opts.max_port + 1) # Init VncAuthProxy class attributes VncAuthProxy.server_timeout = opts.server_timeout VncAuthProxy.connect_retries = opts.connect_retries VncAuthProxy.retry_wait = opts.retry_wait VncAuthProxy.connect_timeout = opts.connect_timeout VncAuthProxy.ports = ports VncAuthProxy.authdb = parse_auth_file(opts.auth_file) VncAuthProxy.keyfile = opts.key_file VncAuthProxy.certfile = opts.cert_file VncAuthProxy.proxy_address = opts.proxy_listen_address VncAuthProxy.fqdn = socket.getfqdn() sockets = get_listening_sockets(opts.listen_port, opts.listen_address, reuse_addr=True) wrap_ssl = lambda sock: sock if opts.enable_ssl: ssl_prot = ssl.PROTOCOL_TLSv1 wrap_ssl = lambda sock: ssl.wrap_socket(sock, server_side=True, keyfile=opts.key_file, certfile=opts.cert_file, ssl_version=ssl_prot) # Become a daemon: # Redirect stdout and stderr to handler.stream to catch # early errors in the daemonization process [e.g., pidfile creation] # which will otherwise go to /dev/null. daemon_context = AllFilesDaemonContext(pidfile=pidf, umask=0022, stdout=handler.stream, stderr=handler.stream, files_preserve=[handler.stream]) # Remove any stale PID files, left behind by previous invocations if daemon.runner.is_pidfile_stale(pidf): logger.warning("Removing stale PID lock file %s", pidf.path) pidf.break_lock() try: daemon_context.open() except (AlreadyLocked, LockTimeout): raise InternalError(("Failed to lock PID file %s, another " "instance running?"), pidf.path) logger.info("Became a daemon") # A fork() has occured while daemonizing, # we *must* reinit gevent gevent.reinit() # Catch signals to ensure graceful shutdown, # # Uses gevent.signal so the handler fires even during # gevent.socket.accept() gevent.signal(SIGINT, fatal_signal_handler, "SIGINT") gevent.signal(SIGTERM, fatal_signal_handler, "SIGTERM") except InternalError as err: logger.critical(err) sys.exit(1) except Exception as err: logger.critical("Unexpected error:") logger.exception(err) sys.exit(1) while True: try: client = None rlist, _, _ = select(sockets, [], []) for ctrl in rlist: client, _ = ctrl.accept() client = wrap_ssl(client) logger.info("New control connection") VncAuthProxy.spawn(logger, client) continue except Exception as err: logger.error("Unexpected error:") logger.exception(err) if client: client.close() continue except SystemExit: break try: logger.info("Closing control sockets") while sockets: sock = sockets.pop() sock.close() daemon_context.close() sys.exit(0) except Exception as err: logger.critical("Unexpected error:") logger.exception(err) sys.exit(1)
# -*- coding: utf-8 -*- """ Test for issue #1526: - dnspython is imported first; - no monkey-patching is done. """ from __future__ import print_function from __future__ import absolute_import import dns # pylint:disable=import-error assert dns import gevent.socket as socket # pylint:disable=consider-using-from-import socket.getfqdn() # create the resolver from gevent.resolver.dnspython import dns as gdns import dns.rdtypes # pylint:disable=import-error assert dns is not gdns, (dns, gdns) assert dns.rdtypes is not gdns.rdtypes import sys print(sorted(sys.modules))
def _init(self): # Python-level ID contains all the core details, our own ID and that of the thread (greenlet) that creates us _current_thread = current_thread() python_id = '{}.{}.{}'.format(hex(id(self)), _current_thread.name, hex(_current_thread.ident)) # Assign core attributes to this object before calling parent class self.python_id = python_id # Must be set here and then to True later on because our parent class may already want # to accept connections, and we need to postpone their processing until we are initialized fully. self._initialized = False self.has_session_opened = False self._token = None self.update_lock = RLock() self.ext_client_id = None self.ext_client_name = None self.connection_time = self.last_seen = datetime.utcnow() self.sec_type = self.config.sec_type self.pings_missed = 0 self.pings_missed_threshold = self.config.get('pings_missed_threshold', 5) self.user_data = Bunch() # Arbitrary user-defined data self._disconnect_requested = False # Have we been asked to disconnect this client? # Last the we received a ping response (pong) from our peer self.ping_last_response_time = None # # If the peer ever subscribes to a pub/sub topic we will periodically # store in the ODB information about the last time the peer either sent # or received anything from us. Note that we store it if: # # * The peer has at least one subscription, and # * At least self.pubsub_interact_interval seconds elapsed since the last update # # And: # # * The peer received a pub/sub message, or # * The peer sent a pub/sub message # # Or: # # * The peer did not send or receive anything, but # * The peer correctly responds to ping messages # # Such a logic ensures that we do not overwhelm the database with frequent updates # if the peer uses pub/sub heavily - it is costly to do it for each message. # # At the same time, if the peer does not receive or send anything but it is still connected # (because it responds to ping) we set its SQL status too. # # All of this lets background processes clean up WSX clients that subscribe at one # point but they are never seen again, which may (theoretically) happen if a peer disconnects # in a way that does not allow for Zato to clean up its subscription status in the ODB. # self.pubsub_interact_interval = WEB_SOCKET.DEFAULT.INTERACT_UPDATE_INTERVAL self.interact_last_updated = None self.last_interact_source = None self.interact_last_set = None # Manages access to service hooks if self.config.hook_service: self.hook_tool = HookTool(self.config.parallel_server, HookCtx, hook_type_to_method, self.invoke_service) self.on_connected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_CONNECTED) self.on_disconnected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_DISCONNECTED) self.on_pubsub_response_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_PUBSUB_RESPONSE) else: self.hook_tool = None self.on_connected_service_invoker = None self.on_disconnected_service_invoker = None self.on_pubsub_response_service_invoker = None # For publish/subscribe over WSX self.pubsub_tool = PubSubTool(self.config.parallel_server.worker_store.pubsub, self, PUBSUB.ENDPOINT_TYPE.WEB_SOCKETS.id, self.deliver_pubsub_msg) # Active WebSocket client ID (WebSocketClient model, web_socket_client.id in SQL) self._sql_ws_client_id = None # For tokens assigned externally independent of our WS-level self.token. # Such tokens will be generated by Vault, for instance. self.ext_token = None # Drop WSGI keys pointing to complex Python objects such as sockets for name in _wsgi_drop_keys: self.initial_http_wsgi_environ.pop(name, None) # Responses to previously sent requests - keyed by request IDs self.responses_received = {} _local_address = self.sock.getsockname() self._local_address = '{}:{}'.format(_local_address[0], _local_address[1]) _peer_address = self.sock.getpeername() self._peer_address = '{}:{}'.format(_peer_address[0], _peer_address[1]) self.forwarded_for = self.initial_http_wsgi_environ.get('HTTP_X_FORWARDED_FOR') if self.forwarded_for: self.forwarded_for_fqdn = socket.getfqdn(self.forwarded_for) else: self.forwarded_for_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN _peer_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN try: self._peer_host = socket.gethostbyaddr(_peer_address[0])[0] _peer_fqdn = socket.getfqdn(self._peer_host) except Exception: logger.warn(format_exc()) finally: self._peer_fqdn = _peer_fqdn self.peer_conn_info_pretty = self.get_peer_info_pretty() self._parse_func = { DATA_FORMAT.JSON: self.parse_json, DATA_FORMAT.XML: self.parse_xml, }[self.config.data_format] # All set, we can process connections now self._initialized = True
def __call__(self, env, read, write): sock = env['socket'] if not hasattr(sock, 'fqdn'): sock.fqdn = getattr(sock, 'host', '') sock.port = getattr(sock, 'port', '') try: sock.fqdn = socket.getfqdn(sock.fqdn) except socket.error: # @todo: error silented pass env_http = env['http'] headers = env_http['request']['header'] environ =\ { 'GATEWAY_INTERFACE': 'CGI/1.1' , 'SERVER_PROTOCOL': env['http']['request_version'] , 'SERVER_SOFTWARE': self._environ_software , 'SERVER_NAME': sock.fqdn , 'SERVER_PORT': sock.port , 'REMOTE_ADDR': env['remoteclient']['address'][0] , 'wsgi.version': (1, 0) , 'wsgi.multithread': False , 'wsgi.multiprocess': False , 'wsgi.run_once': False , 'wsgi.errors': sys.stderr , 'wsgi.url_scheme': 'http' # @todo: https ( socket first ) , 'csgi.env': env } environ['SCRIPT_NAME'] = self.approot or env.get('route', {}).get( 'approot', '') environ['wsgi.input'] = Input( read, environ.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked') environ['PATH_INFO'] = env_http['path'][len(environ['SCRIPT_NAME']):] environ['QUERY_STRING'] = env_http['query'] environ['REQUEST_METHOD'] = env_http['method'] if headers.typeheader is not None: environ['CONTENT_TYPE'] = headers.typeheader length = headers.getheader('content-length') if length: environ['CONTENT_LENGTH'] = length for (key, value) in headers.items(): key = key.replace('-', '_').upper() if key not in ('CONTENT_TYPE', 'CONTENT_LENGTH'): value = value.strip() key = 'HTTP_' + key if key in environ: if 'COOKIE' in key: environ[key] += '; ' + value else: environ[key] += ',' + value else: environ[key] = value result = self.handler\ ( environ , lambda status, headers, exc_info=None: self._start_response(env, write, status, headers, exc_info) ) for data in result: if data: write(data)
def __call__( self, env, read, write ): sock = env['socket'] if not hasattr(sock,'fqdn'): sock.fqdn = getattr( sock, 'host', '' ) sock.port = getattr( sock, 'port', '' ) try: sock.fqdn = socket.getfqdn( sock.fqdn ) except socket.error: pass env_http = env['http'] headers = env_http['request']['header'] environ =\ { 'GATEWAY_INTERFACE': 'CGI/1.1' , 'SERVER_PROTOCOL': env['http']['request_version'] , 'SERVER_SOFTWARE': self._environ_software , 'SERVER_NAME': sock.fqdn , 'SERVER_PORT': sock.port , 'REMOTE_ADDR': env['remoteclient']['address'][0] , 'wsgi.version': (1, 0) , 'wsgi.multithread': False , 'wsgi.multiprocess': False , 'wsgi.run_once': False , 'wsgi.errors': sys.stderr , 'wsgi.url_scheme': 'http' # TODO: https ( socket first ) , 'csgi.env': env } environ['SCRIPT_NAME'] = self.approot or env.get('route',{}).get('approot','') environ['wsgi.input'] = Input( read, environ\ .get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked' ) environ['PATH_INFO'] = env_http['path'][len(environ['SCRIPT_NAME']):] environ['QUERY_STRING'] = env_http['query'] environ['REQUEST_METHOD'] = env_http['method'] if headers.typeheader is not None: environ['CONTENT_TYPE'] = headers.typeheader length = headers.getheader('content-length') if length: environ['CONTENT_LENGTH'] = length for (key,value) in headers.items(): key = key.replace('-', '_').upper() if key not in ('CONTENT_TYPE', 'CONTENT_LENGTH'): value = value.strip() key = 'HTTP_' + key if key in environ: if 'COOKIE' in key: environ[key] += '; ' + value else: environ[key] += ',' + value else: environ[key] = value result = self.handler( environ, lambda status, headers, exc_info=None\ : self._start_response( env, write, status, headers, exc_info ) ) for data in result: if data: write(data)