Ejemplo n.º 1
0
    def __init__(self, configuration, poller):
        self.low = configuration.redirector.minimum  # minimum concurrent redirector workers
        self.high = configuration.redirector.maximum  # maximum concurrent redirector workers

        self.poller = poller
        self.configuration = configuration
        self.queue = Queue(
        )  # store requests we do not immediately have the resources to process

        self.nextid = 1  # unique id to give to the next spawned worker
        self.worker = {}  # worker tasks for each spawned child
        self.processes = {
        }  # worker tasks indexed by file descriptors we can poll
        self.available = set(
        )  # workers that are currently available to handle new requests
        self.active = {
        }  # workers that are currently busy waiting for a response from the spawned process
        self.stopping = set(
        )  # workers we want to stop as soon as they stop being active

        program = configuration.redirector.program
        protocol = configuration.redirector.protocol
        self.redirector_factory = RedirectorFactory(configuration, program,
                                                    protocol)

        self.log = Logger('manager', configuration.log.manager)
Ejemplo n.º 2
0
	def __init__ (self, configuration, name, program, protocol):
		self.configuration = configuration
		self.http_parser = self.HTTPParser(configuration)
		self.tls_parser = self.TLSParser(configuration)
		self.enabled = bool(program is not None) and configuration.redirector.enable
		self._transparent = configuration.http.transparent
		self.log = Logger('worker ' + str(name), configuration.log.worker)
		self.usage = UsageLogger('usage', configuration.log.worker)
		self.response_factory = self.ResponseFactory()
		self.child_factory = self.ChildFactory(configuration, name)

		self.wid = name							   # a unique name
		self.creation = time.time()				   # when the thread was created
	#	self.last_worked = self.creation			  # when the thread last picked a task

		self.program = program						# the squid redirector program to fork
		self.running = True						   # the thread is active

		self.stats_timestamp = None				   # time of the most recent outstanding request to generate stats

		self._proxy = 'ExaProxy-%s-id-%d' % (configuration.proxy.version,os.getpid())

		universal = configuration.redirector.protocol == 'url'
		# Do not move, we need the forking AFTER the setup
		if program:
			self.process = self.child_factory.createProcess(self.program, universal=universal)
		else:
			self.process = None
Ejemplo n.º 3
0
	def __init__ (self, configuration, querier, decider, logger, poller):
		self.querier = querier    # Incoming requests from the proxy
		self.decider = decider    # Decides how each request should be handled
		self.logger = logger      # Log writing interfaces
		self.poller = poller

		# NOT the same logger the rest of the proxy uses since we're running in a different process
		self.log = Logger('redirector', configuration.log.supervisor)
		self.running = True
Ejemplo n.º 4
0
	def __init__(self, name, poller, read_name, max_clients):
		self.socks = {}
		self.name = name
		self.poller = poller
		self.read_name = read_name
		self.max_clients = max_clients
		self.client_count = 0
		self.saturated = False  # we are receiving more connections than we can handle
		self.binding = set()
		self.serving = True  # We are currenrly listening
		self.log = Logger('server', configuration.log.server)
		self.log.info('server [%s] accepting up to %d clients' % (name, max_clients))
Ejemplo n.º 5
0
 def __init__(self, poller, configuration):
     self.total_sent4 = 0L
     self.total_sent6 = 0L
     self.total_requested = 0L
     self.norequest = TimeCache(configuration.http.idle_connect)
     self.bysock = {}
     self.byname = {}
     self.buffered = []
     self._nextid = 0
     self.poller = poller
     self.log = Logger('client', configuration.log.client)
     self.proxied = configuration.http.proxied
     self.max_buffer = configuration.http.header_size
Ejemplo n.º 6
0
	def __init__ (self,configuration,poller):
		self.configuration = configuration

		self.low = configuration.redirector.minimum       # minimum number of workers at all time
		self.high = configuration.redirector.maximum      # maximum numbe of workers at all time
		self.program = configuration.redirector.program   # what program speaks the squid redirector API

		self.nextid = 1                   # incremental number to make the name of the next worker
		self.queue = Queue()              # queue with HTTP headers to process
		self.poller = poller              # poller interface that checks for events on sockets
		self.worker = {}                  # our workers threads
		self.closing = set()              # workers that are currently closing
		self.running = True               # we are running

		self.log = Logger('manager', configuration.log.manager)
Ejemplo n.º 7
0
	def __init__(self, supervisor, configuration):
		self.total_sent4 = 0L
		self.total_sent6 = 0L
		self.opening = {}
		self.established = {}
		self.byclientid = {}
		self.buffered = []
		self.retry = []
		self.configuration = configuration
		self.supervisor = supervisor

		self.poller = supervisor.poller
		self.log = Logger('download', configuration.log.download)

		self.location = os.path.realpath(os.path.normpath(configuration.web.html))
		self.page = supervisor.page
		self._header = {}
Ejemplo n.º 8
0
    def __init__(self, configuration, web, proxy, decider, content, client,
                 resolver, logger, usage, poller):
        self.web = web  # Manage listening web sockets
        self.proxy = proxy  # Manage listening proxy sockets
        self.decider = decider  # Task manager for handling child decider processes
        self.content = content  # The Content Download manager
        self.client = client  # Currently open client connections
        self.resolver = resolver  # The DNS query manager
        self.poller = poller  # Interface to the poller
        self.logger = logger  # Log writing interfaces
        self.usage = usage  # Request logging
        self.running = True  # Until we stop we run :)
        self.nb_events = 0L  # Number of events received
        self.nb_loops = 0L  # Number of loop iteration
        self.events = []  # events so we can report them once in a while

        self.log = Logger('supervisor', configuration.log.supervisor)
Ejemplo n.º 9
0
	def __init__(self, poller, configuration):
		self.total_sent4 = 0L
		self.total_sent6 = 0L
		self.total_requested = 0L
		self.norequest = TimeCache(configuration.http.idle_connect)
		self.bysock = {}
		self.byname = {}
		self.buffered = []
		self._nextid = 0
		self.poller = poller
		self.log = Logger('client', configuration.log.client)
		self.http_max_buffer = configuration.http.header_size
		self.icap_max_buffer = configuration.icap.header_size
		self.tls_max_buffer = configuration.tls.header_size
		self.passthrough_max_buffer = 0
		self.proxied = {
			'proxy' : configuration.http.proxied,
			'icap'  : configuration.icap.proxied,
			'tls'   : configuration.tls.proxied,
		}
Ejemplo n.º 10
0
    def __init__(self, poller, configuration, max_workers):
        self.poller = poller
        self.configuration = configuration

        self.resolver_factory = self.resolverFactory(configuration)

        # The actual work is done in the worker
        self.worker = self.resolver_factory.createUDPClient()

        # All currently active clients (one UDP and many TCP)
        self.workers = {}
        self.workers[self.worker.socket] = self.worker
        self.poller.addReadSocket('read_resolver', self.worker.socket)

        # Track the clients currently expecting results
        self.clients = {}  # client_id : identifier

        # Key should be the hostname rather than the request ID?
        self.resolving = {}  # identifier, worker_id :

        # TCP workers that have not yet sent a complete request
        self.sending = {}  # sock :

        # Maximum number of entry we will cache (1024 DNS lookup per second !)
        # assuming 1k per entry, which is a lot, it mean 20Mb of memory
        # which at the default of 900 seconds of cache is 22 new host per seonds
        self.max_entries = 1024 * 20

        # track the current queries and when they were started
        self.active = []

        self.cache = {}
        self.cached = deque()

        self.max_workers = max_workers
        self.worker_count = len(self.workers)  # the UDP client

        self.waiting = []

        self.log = Logger('resolver', configuration.log.resolver)
        self.chained = {}
Ejemplo n.º 11
0
    def __init__(self, configuration, name, request_box, program):
        self.configuration = configuration
        self.icap_parser = self.ICAPParser(configuration)
        self.enabled = configuration.redirector.enable
        self.protocol = configuration.redirector.protocol
        self._transparent = configuration.http.transparent
        self.log = Logger('worker ' + str(name), configuration.log.worker)
        self.usage = UsageLogger('usage', configuration.log.worker)

        self.universal = True if self.protocol == 'url' else False
        self.icap = self.protocol[len('icap://'):].split(
            '/')[0] if self.protocol.startswith('icap://') else ''

        r, w = os.pipe()  # pipe for communication with the main thread
        self.response_box_write = os.fdopen(w, 'w',
                                            0)  # results are written here
        self.response_box_read = os.fdopen(r, 'r',
                                           0)  # read from the main thread

        self.wid = name  # a unique name
        self.creation = time.time()  # when the thread was created
        #	self.last_worked = self.creation			  # when the thread last picked a task
        self.request_box = request_box  # queue with HTTP headers to process

        self.program = program  # the squid redirector program to fork
        self.running = True  # the thread is active

        self.stats_timestamp = None  # time of the most recent outstanding request to generate stats

        self._proxy = 'ExaProxy-%s-id-%d' % (configuration.proxy.version,
                                             os.getpid())

        if self.protocol == 'url':
            self.classify = self._classify_url
        if self.protocol.startswith('icap://'):
            self.classify = self._classify_icap

        # Do not move, we need the forking AFTER the setup
        self.process = self._createProcess(
        )  # the forked program to handle classification
        Thread.__init__(self)
Ejemplo n.º 12
0
 def __init__(self, supervisor):
     self.supervisor = supervisor
     self.monitor = supervisor.monitor
     self.email_sent = False
     self.log = Logger('web', supervisor.configuration.log.web)
Ejemplo n.º 13
0
	def __init__ (self,configuration):
		configuration = load()
		self.configuration = configuration

		# Only here so the introspection code can find them
		self.log = Logger('supervisor', configuration.log.supervisor)
		self.log.error('Starting exaproxy version %s' % configuration.proxy.version)

		self.signal_log = Logger('signal', configuration.log.signal)
		self.log_writer = SysLogWriter('log', configuration.log.destination, configuration.log.enable, level=configuration.log.level)
		self.usage_writer = UsageWriter('usage', configuration.usage.destination, configuration.usage.enable)

		self.log_writer.setIdentifier(configuration.daemon.identifier)
		#self.usage_writer.setIdentifier(configuration.daemon.identifier)

		if configuration.debug.log:
			self.log_writer.toggleDebug()
			self.usage_writer.toggleDebug()

		self.log.error('python version %s' % sys.version.replace(os.linesep,' '))
		self.log.debug('starting %s' % sys.argv[0])

		self.pid = PID(self.configuration)

		self.daemon = Daemon(self.configuration)
		self.poller = Poller(self.configuration.daemon)

		self.poller.setupRead('read_proxy')           # Listening proxy sockets
		self.poller.setupRead('read_web')             # Listening webserver sockets
		self.poller.setupRead('read_icap')             # Listening icap sockets
		self.poller.setupRead('read_workers')         # Pipes carrying responses from the child processes
		self.poller.setupRead('read_resolver')        # Sockets currently listening for DNS responses

		self.poller.setupRead('read_client')          # Active clients
		self.poller.setupRead('opening_client')       # Clients we have not yet read a request from
		self.poller.setupWrite('write_client')        # Active clients with buffered data to send
		self.poller.setupWrite('write_resolver')      # Active DNS requests with buffered data to send

		self.poller.setupRead('read_download')        # Established connections
		self.poller.setupWrite('write_download')      # Established connections we have buffered data to send to
		self.poller.setupWrite('opening_download')    # Opening connections

		self.monitor = Monitor(self)
		self.page = Page(self)
		self.manager = RedirectorManager(
			self.configuration,
			self.poller,
		)
		self.content = ContentManager(self,configuration)
		self.client = ClientManager(self.poller, configuration)
		self.resolver = ResolverManager(self.poller, self.configuration, configuration.dns.retries*10)
		self.proxy = Server('http proxy',self.poller,'read_proxy', configuration.http.connections)
		self.web = Server('web server',self.poller,'read_web', configuration.web.connections)
		self.icap = Server('icap server',self.poller,'read_icap', configuration.icap.connections)

		self.reactor = Reactor(self.configuration, self.web, self.proxy, self.icap, self.manager, self.content, self.client, self.resolver, self.log_writer, self.usage_writer, self.poller)

		self._shutdown = True if self.daemon.filemax == 0 else False  # stop the program
		self._softstop = False  # stop once all current connection have been dealt with
		self._reload = False  # unimplemented
		self._toggle_debug = False  # start logging a lot
		self._decrease_spawn_limit = 0
		self._increase_spawn_limit = 0
		self._refork = False  # unimplemented
		self._pdb = False  # turn on pdb debugging
		self._listen = None  # listening change ? None: no, True: listen, False: stop listeing
		self.wait_time = 5.0  # how long do we wait at maximum once we have been soft-killed
		self.local = set()  # what addresses are on our local interfaces

		self.interfaces()

		signal.signal(signal.SIGQUIT, self.sigquit)
		signal.signal(signal.SIGINT, self.sigterm)
		signal.signal(signal.SIGTERM, self.sigterm)
		# signal.signal(signal.SIGABRT, self.sigabrt)
		# signal.signal(signal.SIGHUP, self.sighup)

		signal.signal(signal.SIGTRAP, self.sigtrap)

		signal.signal(signal.SIGUSR1, self.sigusr1)
		signal.signal(signal.SIGUSR2, self.sigusr2)
		signal.signal(signal.SIGTTOU, self.sigttou)
		signal.signal(signal.SIGTTIN, self.sigttin)

		signal.signal(signal.SIGALRM, self.sigalrm)

		# make sure we always have data in history
		# (done in zero for dependencies reasons)
		self.monitor.zero()
Ejemplo n.º 14
0
# encoding: utf-8
"""
async/__init__.py

Created by David Farrar on 2012-01-31.
Copyright (c) 2011-2013  Exa Networks. All rights reserved.
"""

import sys
import select

from exaproxy.util.log.logger import Logger
from exaproxy.configuration import load

configuration = load()
log = Logger('supervisor', configuration.log.supervisor)


def Poller(configuration, speed=None):
    reactor = configuration.reactor

    if reactor == 'best':
        if sys.platform.startswith('linux'):
            configuration.reactor = 'epoll'
        elif sys.platform.startswith('freebsd'):
            configuration.reactor = 'kqueue'
        elif sys.platform.startswith('darwin'):
            configuration.reactor = 'kqueue'
        else:
            log.error(
                'we could not autodetect an high performance reactor for your OS'
Ejemplo n.º 15
0
Created by Thomas Mangin on 2011-11-30.
Copyright (c) 2011-2013  Exa Networks. All rights reserved.
"""

import socket
import errno

from exaproxy.util.log.logger import Logger
from exaproxy.network.errno_list import errno_block
from exaproxy.configuration import load

IP_TRANSPARENT = 19

configuration = load()
log = Logger('server', configuration.log.server)

def isipv4(address):
	try:
		socket.inet_pton(socket.AF_INET, address)
		return True
	except socket.error:
		return False

def isipv6(address):
	try:
		socket.inet_pton(socket.AF_INET6, address)
		return True
	except socket.error:
		return False
Ejemplo n.º 16
0
 def __init__(self, configuration, name):
     self.log = Logger('worker ' + str(name), configuration.log.worker)
Ejemplo n.º 17
0
    def __init__(self, configuration):
        self.configuration = configuration

        # Only here so the introspection code can find them
        self.log = Logger('supervisor', configuration.log.supervisor)
        self.log.error('Starting exaproxy version %s' %
                       configuration.proxy.version)

        self.signal_log = Logger('signal', configuration.log.signal)
        self.log_writer = SysLogWriter('log',
                                       configuration.log.destination,
                                       configuration.log.enable,
                                       level=configuration.log.level)
        self.usage_writer = UsageWriter('usage',
                                        configuration.usage.destination,
                                        configuration.usage.enable)

        sys.exitfunc = self.log_writer.writeMessages

        self.log_writer.setIdentifier(configuration.daemon.identifier)
        #self.usage_writer.setIdentifier(configuration.daemon.identifier)

        if configuration.debug.log:
            self.log_writer.toggleDebug()
            self.usage_writer.toggleDebug()

        self.log.error('python version %s' %
                       sys.version.replace(os.linesep, ' '))
        self.log.debug('starting %s' % sys.argv[0])

        self.pid = PID(self.configuration)

        self.daemon = Daemon(self.configuration)
        self.poller = Poller(self.configuration.daemon)

        self.poller.setupRead('read_proxy')  # Listening proxy sockets
        self.poller.setupRead('read_web')  # Listening webserver sockets
        self.poller.setupRead('read_icap')  # Listening icap sockets
        self.poller.setupRead('read_tls')  # Listening tls sockets
        self.poller.setupRead('read_passthrough')  # Listening raw data sockets
        self.poller.setupRead(
            'read_redirector'
        )  # Pipes carrying responses from the redirector process
        self.poller.setupRead(
            'read_resolver')  # Sockets currently listening for DNS responses

        self.poller.setupRead('read_client')  # Active clients
        self.poller.setupRead(
            'opening_client')  # Clients we have not yet read a request from
        self.poller.setupWrite(
            'write_client')  # Active clients with buffered data to send
        self.poller.setupWrite(
            'write_resolver')  # Active DNS requests with buffered data to send

        self.poller.setupRead('read_download')  # Established connections
        self.poller.setupWrite(
            'write_download'
        )  # Established connections we have buffered data to send to
        self.poller.setupWrite('opening_download')  # Opening connections

        self.poller.setupRead('read_interrupt')  # Scheduled events
        self.poller.setupRead(
            'read_control'
        )  # Responses from commands sent to the redirector process

        self.monitor = Monitor(self)
        self.page = Page(self)
        self.content = ContentManager(self, configuration)
        self.client = ClientManager(self.poller, configuration)
        self.resolver = ResolverManager(self.poller, self.configuration,
                                        configuration.dns.retries * 10)
        self.proxy = Server('http proxy', self.poller, 'read_proxy',
                            configuration.http)
        self.web = Server('web server', self.poller, 'read_web',
                          configuration.web)
        self.icap = Server('icap server', self.poller, 'read_icap',
                           configuration.icap)
        self.tls = Server('tls server', self.poller, 'read_tls',
                          configuration.tls)
        self.passthrough = InterceptServer('passthrough server', self.poller,
                                           'read_passthrough',
                                           configuration.passthrough)

        self._shutdown = True if self.daemon.filemax == 0 else False  # stop the program
        self._softstop = False  # stop once all current connection have been dealt with
        self._reload = False  # unimplemented
        self._toggle_debug = False  # start logging a lot
        self._decrease_spawn_limit = 0
        self._increase_spawn_limit = 0
        self._refork = False  # unimplemented
        self._pdb = False  # turn on pdb debugging
        self._listen = None  # listening change ? None: no, True: listen, False: stop listeing
        self.wait_time = 5.0  # how long do we wait at maximum once we have been soft-killed
        self.local = set()  # what addresses are on our local interfaces

        if not self.initialise():
            self._shutdown = True

        elif self.daemon.drop_privileges():
            self.log.critical(
                'Could not drop privileges to \'%s\'. Refusing to run as root'
                % self.daemon.user)
            self.log.critical(
                'Set the environment value USER to change the unprivileged user'
            )
            self._shutdown = True

        # fork the redirector process before performing any further setup
        redirector = fork_redirector(self.poller, self.configuration)

        # use simple blocking IO for communication with the redirector process
        self.redirector = redirector_message_thread(redirector)

        # NOTE: create threads _after_ all forking is done

        # regularly interrupt the reactor for maintenance
        self.interrupt_scheduler = alarm_thread(self.poller, self.alarm_time)

        self.reactor = Reactor(self.configuration, self.web, self.proxy,
                               self.passthrough, self.icap, self.tls,
                               self.redirector, self.content, self.client,
                               self.resolver, self.log_writer,
                               self.usage_writer, self.poller)

        self.interfaces()

        signal.signal(signal.SIGQUIT, self.sigquit)
        signal.signal(signal.SIGINT, self.sigterm)
        signal.signal(signal.SIGTERM, self.sigterm)
        # signal.signal(signal.SIGABRT, self.sigabrt)
        # signal.signal(signal.SIGHUP, self.sighup)

        signal.signal(signal.SIGTRAP, self.sigtrap)

        signal.signal(signal.SIGUSR1, self.sigusr1)
        signal.signal(signal.SIGUSR2, self.sigusr2)
        signal.signal(signal.SIGTTOU, self.sigttou)
        signal.signal(signal.SIGTTIN, self.sigttin)

        # make sure we always have data in history
        # (done in zero for dependencies reasons)

        if self._shutdown is False:
            self.redirector.requestStats()
            command, control_data = self.redirector.readResponse()
            stats_data = control_data if command == 'STATS' else None

            stats = self.monitor.statistics(stats_data)
            ok = self.monitor.zero(stats)

            if ok:
                self.redirector.requestStats()

            else:
                self._shutdown = True