def test_check_badHMACkey(self): """A challenge with a bad HMAC key should return False.""" hmacKeyBad = crypto.getKey('test_gimpCaptcha_badHMACkey') c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey, self.cacheDir) image, challenge = c.get() self.assertEquals( c.check(challenge, c.answer, c.secretKey, hmacKeyBad), False)
def setUp(self): here = os.getcwd() self.topDir = here.rstrip('_trial_temp') self.cacheDir = os.path.join(self.topDir, 'captchas') self.badCacheDir = os.path.join(here, 'capt') # Get keys for testing or create them: self.sekrit, self.publik = crypto.getRSAKey('test_gimpCaptcha_RSAkey') self.hmacKey = crypto.getKey('test_gimpCaptcha_HMACkey')
def setUp(self): here = os.getcwd() self.topDir = here.rstrip("_trial_temp") self.cacheDir = os.path.join(self.topDir, "captchas") self.badCacheDir = os.path.join(here, "capt") # Get keys for testing or create them: self.sekrit, self.publik = crypto.getRSAKey("test_gimpCaptcha_RSAkey") self.hmacKey = crypto.getKey("test_gimpCaptcha_HMACkey")
def test_getKey_keyexists(self): """Write the example key to a file and test reading it back.""" filename = self.mktemp() with open(filename, 'wb') as fh: fh.write(SEKRIT_KEY) fh.flush() key = crypto.getKey(filename) self.failUnlessIsInstance(key, basestring, "key isn't a string! type=%r" % type(key)) self.assertEqual(SEKRIT_KEY, key, """The example key and the one read from file differ! key (in hex): %s SEKRIT_KEY (in hex): %s""" % (key.encode('hex'), SEKRIT_KEY.encode('hex')))
def test_getKey_keyexists(self): """Write the example key to a file and test reading it back.""" filename = self.mktemp() with open(filename, 'wb') as fh: fh.write(SEKRIT_KEY) fh.flush() key = crypto.getKey(filename) self.failUnlessIsInstance(key, bytes, "key isn't bytes! type=%r" % type(key)) self.assertEqual(SEKRIT_KEY, key, """The example key and the one read from file differ! key (in hex): %s SEKRIT_KEY (in hex): %s""" % (binascii.hexlify(key).decode('utf-8'), binascii.hexlify(SEKRIT_KEY).decode('utf-8')))
def addWebServer(cfg, dist, sched): """Set up a web server. :param cfg: A configuration object from :mod:`bridgedb.Main`. Currently, we use these options:: HTTPS_N_BRIDGES_PER_ANSWER HTTP_UNENCRYPTED_PORT HTTP_UNENCRYPTED_BIND_IP HTTP_USE_IP_FROM_FORWARDED_HEADER HTTPS_PORT HTTPS_BIND_IP HTTPS_USE_IP_FROM_FORWARDED_HEADER RECAPTCHA_ENABLED RECAPTCHA_PUB_KEY RECAPTCHA_PRIV_KEY RECAPTCHA_REMOTEIP GIMP_CAPTCHA_ENABLED GIMP_CAPTCHA_DIR :type dist: :class:`bridgedb.Dist.IPBasedDistributor` :param dist: A bridge distributor. :type sched: :class:`bridgedb.Time.IntervalSchedule` :param sched: DOCDOC """ httpdist = resource.Resource() httpdist.putChild('', WebRoot()) httpdist.putChild('robots.txt', static.File(os.path.join(template_root, 'robots.txt'))) httpdist.putChild('assets', static.File(os.path.join(template_root, 'assets/'))) httpdist.putChild('options', WebResourceOptions()) bridgesResource = WebResourceBridges( dist, sched, cfg.HTTPS_N_BRIDGES_PER_ANSWER, cfg.HTTP_USE_IP_FROM_FORWARDED_HEADER, includeFingerprints=cfg.HTTPS_INCLUDE_FINGERPRINTS) if cfg.RECAPTCHA_ENABLED: protected = ReCaptchaProtectedResource( recaptchaPrivKey=cfg.RECAPTCHA_PRIV_KEY, recaptchaPubKey=cfg.RECAPTCHA_PUB_KEY, remoteip=cfg.RECAPTCHA_REMOTEIP, useForwardedHeader=cfg.HTTP_USE_IP_FROM_FORWARDED_HEADER, protectedResource=bridgesResource) httpdist.putChild('bridges', protected) elif cfg.GIMP_CAPTCHA_ENABLED: # Get the HMAC secret key for CAPTCHA challenges and create a new key # from it for use on the server: captchaKey = crypto.getKey(cfg.GIMP_CAPTCHA_HMAC_KEYFILE) hmacKey = crypto.getHMAC(captchaKey, "Captcha-Key") # Load or create our encryption keys: secretKey, publicKey = crypto.getRSAKey(cfg.GIMP_CAPTCHA_RSA_KEYFILE) protected = GimpCaptchaProtectedResource( secretKey=secretKey, publicKey=publicKey, hmacKey=hmacKey, captchaDir=cfg.GIMP_CAPTCHA_DIR, useForwardedHeader=cfg.HTTP_USE_IP_FROM_FORWARDED_HEADER, protectedResource=bridgesResource) httpdist.putChild('bridges', protected) else: httpdist.putChild('bridges', bridgesResource) site = server.Site(httpdist) if cfg.HTTP_UNENCRYPTED_PORT: ip = cfg.HTTP_UNENCRYPTED_BIND_IP or "" try: reactor.listenTCP(cfg.HTTP_UNENCRYPTED_PORT, site, interface=ip) except CannotListenError as error: raise SystemExit(error) if cfg.HTTPS_PORT: from twisted.internet.ssl import DefaultOpenSSLContextFactory #from OpenSSL.SSL import SSLv3_METHOD ip = cfg.HTTPS_BIND_IP or "" factory = DefaultOpenSSLContextFactory(cfg.HTTPS_KEY_FILE, cfg.HTTPS_CERT_FILE) try: reactor.listenSSL(cfg.HTTPS_PORT, site, factory, interface=ip) except CannotListenError as error: raise SystemExit(error) return site
def test_getKey_tmpfile(self): """Test retrieving the secret_key from a new tmpfile.""" filename = self.mktemp() key = crypto.getKey(filename) self.failUnlessIsInstance(key, bytes, "key isn't bytes! type=%r" % type(key))
def test_getKey_nokey(self): """Test retrieving the secret_key from an empty file.""" filename = os.path.join(os.getcwd(), 'sekrit') key = crypto.getKey(filename) self.failUnlessIsInstance(key, bytes, "key isn't bytes! type=%r" % type(key))
def addWebServer(cfg, dist): """Set up a web server for HTTP(S)-based bridge distribution. :type cfg: :class:`bridgedb.persistent.Conf` :param cfg: A configuration object from :mod:`bridgedb.Main`. Currently, we use these options:: HTTP_UNENCRYPTED_PORT HTTP_UNENCRYPTED_BIND_IP HTTP_USE_IP_FROM_FORWARDED_HEADER HTTPS_N_BRIDGES_PER_ANSWER HTTPS_INCLUDE_FINGERPRINTS HTTPS_KEY_FILE HTTPS_CERT_FILE HTTPS_PORT HTTPS_BIND_IP HTTPS_USE_IP_FROM_FORWARDED_HEADER HTTPS_ROTATION_PERIOD RECAPTCHA_ENABLED RECAPTCHA_PUB_KEY RECAPTCHA_SEC_KEY RECAPTCHA_REMOTEIP GIMP_CAPTCHA_ENABLED GIMP_CAPTCHA_DIR GIMP_CAPTCHA_HMAC_KEYFILE GIMP_CAPTCHA_RSA_KEYFILE :type dist: :class:`bridgedb.Dist.IPBasedDistributor` :param dist: A bridge distributor. :raises SystemExit: if the servers cannot be started. :rtype: :api:`twisted.web.server.Site` :returns: A webserver. """ captcha = None fwdHeaders = cfg.HTTP_USE_IP_FROM_FORWARDED_HEADER numBridges = cfg.HTTPS_N_BRIDGES_PER_ANSWER fprInclude = cfg.HTTPS_INCLUDE_FINGERPRINTS logging.info("Starting web servers...") httpdist = resource.Resource() httpdist.putChild('', WebRoot()) httpdist.putChild('robots.txt', static.File(os.path.join(TEMPLATE_DIR, 'robots.txt'))) httpdist.putChild('keys', static.File(os.path.join(TEMPLATE_DIR, 'bridgedb.asc'))) httpdist.putChild('assets', static.File(os.path.join(TEMPLATE_DIR, 'assets/'))) httpdist.putChild('options', WebResourceOptions()) httpdist.putChild('howto', WebResourceHowto()) if cfg.RECAPTCHA_ENABLED: publicKey = cfg.RECAPTCHA_PUB_KEY secretKey = cfg.RECAPTCHA_SEC_KEY captcha = partial(ReCaptchaProtectedResource, remoteIP=cfg.RECAPTCHA_REMOTEIP) elif cfg.GIMP_CAPTCHA_ENABLED: # Get the master HMAC secret key for CAPTCHA challenges, and then # create a new HMAC key from it for use on the server. captchaKey = crypto.getKey(cfg.GIMP_CAPTCHA_HMAC_KEYFILE) hmacKey = crypto.getHMAC(captchaKey, "Captcha-Key") # Load or create our encryption keys: secretKey, publicKey = crypto.getRSAKey(cfg.GIMP_CAPTCHA_RSA_KEYFILE) captcha = partial(GimpCaptchaProtectedResource, hmacKey=hmacKey, captchaDir=cfg.GIMP_CAPTCHA_DIR) if cfg.HTTPS_ROTATION_PERIOD: count, period = cfg.HTTPS_ROTATION_PERIOD.split() sched = ScheduledInterval(count, period) else: sched = Unscheduled() bridges = WebResourceBridges(dist, sched, numBridges, fwdHeaders, includeFingerprints=fprInclude) if captcha: # Protect the 'bridges' page with a CAPTCHA, if configured to do so: protected = captcha(publicKey=publicKey, secretKey=secretKey, useForwardedHeader=fwdHeaders, protectedResource=bridges) httpdist.putChild('bridges', protected) logging.info("Protecting resources with %s." % captcha.func.__name__) else: httpdist.putChild('bridges', bridges) site = server.Site(httpdist) site.displayTracebacks = False if cfg.HTTP_UNENCRYPTED_PORT: ip = cfg.HTTP_UNENCRYPTED_BIND_IP or "" port = cfg.HTTP_UNENCRYPTED_PORT or 80 try: reactor.listenTCP(port, site, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started HTTP server on %s:%d" % (str(ip), int(port))) if cfg.HTTPS_PORT: ip = cfg.HTTPS_BIND_IP or "" port = cfg.HTTPS_PORT or 443 try: from twisted.internet.ssl import DefaultOpenSSLContextFactory factory = DefaultOpenSSLContextFactory(cfg.HTTPS_KEY_FILE, cfg.HTTPS_CERT_FILE) reactor.listenSSL(port, site, factory, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started HTTPS server on %s:%d" % (str(ip), int(port))) return site
def run(options, reactor=reactor): """This is BridgeDB's main entry point and main runtime loop. Given the parsed commandline options, this function handles locating the configuration file, loading and parsing it, and then either (re)parsing plus (re)starting the servers, or dumping bridge assignments to files. :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. :param reactor: An implementer of :api:`twisted.internet.interfaces.IReactorCore`. This parameter is mainly for testing; the default :api:`twisted.internet.epollreactor.EPollReactor` is fine for normal application runs. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`safelog.configureLogging` is # called. Otherwise a default handler that logs to the console will be # created by the imported module, and all further calls to # :func:`logging.basicConfig` will be ignored. util.configureLogging(config) if options.subCommand is not None: runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() # Let our pluggable transport class know what transports are resistant to # active probing. We need to know because we shouldn't hand out a # probing-vulnerable transport on a bridge that supports a # probing-resistant transport. See # <https://bugs.torproject.org/28655> for details. from bridgedb.bridges import PluggableTransport PluggableTransport.probing_resistant_transports = config.PROBING_RESISTANT_TRANSPORTS from bridgedb import persistent state = persistent.State(config=config) from bridgedb.distributors.email.server import addServer as addSMTPServer from bridgedb.distributors.https.server import addWebServer from bridgedb.distributors.moat.server import addMoatServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) proxies = proxy.ProxySet() emailDistributor = None ipDistributor = None moatDistributor = None # Save our state state.key = key state.save() def reload(inThread=True): # pragma: no cover """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables ``cfg`` and ``hashring`` are taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type hashring: A :class:`~bridgedb.bridgerings.BridgeSplitter` :ivar hashring: A class which takes an HMAC key and splits bridges into their hashring assignments. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.info("Reloading the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: logging.info("Loading proxies from: %s" % proxyfile) proxy.loadProxiesFromFile(proxyfile, proxies, removeStale=True) metrics.setProxies(proxies) state.BLACKLISTED_TOR_VERSIONS = parseVersionsList( state.BLACKLISTED_TOR_VERSIONS) logging.info("Reloading blacklisted request headers...") antibot.loadBlacklistedRequestHeaders( config.BLACKLISTED_REQUEST_HEADERS_FILE) logging.info("Reloading decoy bridges...") antibot.loadDecoyBridges(config.DECOY_BRIDGES_FILE) (hashring, emailDistributorTmp, ipDistributorTmp, moatDistributorTmp) = createBridgeRings(cfg, proxies, key) # Initialize our DB. bridgedb.Storage.initializeDBLock() bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") logging.info("Reparsing bridge descriptors...") load(state, hashring, clear=False) logging.info("Bridges loaded: %d" % len(hashring)) loadBlockedBridges(hashring) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No HTTP(S) distributor created!") if moatDistributorTmp is not None: moatDistributorTmp.prepopulateRings() else: logging.warn("No Moat distributor created!") metrix = metrics.InternalMetrics() logging.info("Logging bridge ring metrics for %d rings." % len(hashring.ringsByName)) for ringName, ring in hashring.ringsByName.items(): # Ring is of type FilteredBridgeSplitter or UnallocatedHolder. # FilteredBridgeSplitter splits bridges into subhashrings based on # filters. if hasattr(ring, "filterRings"): for (ringname, (filterFn, subring)) in ring.filterRings.items(): subRingName = "-".join(ring.extractFilterNames(ringname)) metrix.recordBridgesInHashring(ringName, subRingName, len(subring)) elif hasattr(ring, "fingerprints"): metrix.recordBridgesInHashring(ringName, "unallocated", len(ring.fingerprints)) # Dump bridge pool assignments to disk. writeAssignments(hashring, state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if moatDistributorTmp: reactor.callFromThread(replaceBridgeRings, moatDistributor, moatDistributorTmp) if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) if reactor: # pragma: no cover # And actually load it to start parsing. Get back our distributors. emailDistributor, ipDistributor, moatDistributor = reload(False) # Configure all servers: if config.MOAT_DIST and config.MOAT_SHARE: addMoatServer(config, moatDistributor) if config.HTTPS_DIST and config.HTTPS_SHARE: addWebServer(config, ipDistributor) if config.EMAIL_DIST and config.EMAIL_SHARE: addSMTPServer(config, emailDistributor) metrics.setSupportedTransports(config.SUPPORTED_TRANSPORTS) tasks = {} # Setup all our repeating tasks: if config.TASKS['GET_TOR_EXIT_LIST']: tasks['GET_TOR_EXIT_LIST'] = task.LoopingCall( proxy.downloadTorExits, proxies, config.SERVER_PUBLIC_EXTERNAL_IP) if config.TASKS.get('DELETE_UNPARSEABLE_DESCRIPTORS'): delUnparseableSecs = config.TASKS['DELETE_UNPARSEABLE_DESCRIPTORS'] else: delUnparseableSecs = 24 * 60 * 60 # Default to 24 hours # We use the directory name of STATUS_FILE, since that directory # is where the *.unparseable descriptor files will be written to. tasks['DELETE_UNPARSEABLE_DESCRIPTORS'] = task.LoopingCall( runner.cleanupUnparseableDescriptors, os.path.dirname(config.STATUS_FILE), delUnparseableSecs) measurementInterval, _ = config.TASKS['EXPORT_METRICS'] tasks['EXPORT_METRICS'] = task.LoopingCall(writeMetrics, state.METRICS_FILE, measurementInterval) # Schedule all configured repeating tasks: for name, value in config.TASKS.items(): seconds, startNow = value if seconds: try: # Set now to False to get the servers up and running when # first started, rather than spend a bunch of time in # scheduled tasks. tasks[name].start(abs(seconds), now=startNow) except KeyError: logging.info("Task %s is disabled and will not run." % name) else: logging.info("Scheduled task %s to run every %s seconds." % (name, seconds)) # Actually run the servers. try: if reactor and not reactor.running: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: # pragma: no cover logging.fatal("Received keyboard interrupt. Shutting down...") finally: if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()
def test_getKey_nokey(self): """Test retrieving the secret_key from an empty file.""" filename = os.path.join(os.getcwd(), 'sekrit') key = crypto.getKey(filename) self.failUnlessIsInstance(key, basestring, "key isn't a string! type=%r" % type(key))
def test_getKey_tmpfile(self): """Test retrieving the secret_key from a new tmpfile.""" filename = self.mktemp() key = crypto.getKey(filename) self.failUnlessIsInstance(key, basestring, "key isn't a string! type=%r" % type(key))
def run(options, reactor=reactor): """This is BridgeDB's main entry point and main runtime loop. Given the parsed commandline options, this function handles locating the configuration file, loading and parsing it, and then either (re)parsing plus (re)starting the servers, or dumping bridge assignments to files. :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. :param reactor: An implementer of :api:`twisted.internet.interfaces.IReactorCore`. This parameter is mainly for testing; the default :api:`twisted.internet.epollreactor.EPollReactor` is fine for normal application runs. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`safelog.configureLogging` is # called. Otherwise a default handler that logs to the console will be # created by the imported module, and all further calls to # :func:`logging.basicConfig` will be ignored. util.configureLogging(config) if options['dump-bridges'] or (options.subCommand is not None): runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() from bridgedb import persistent state = persistent.State(config=config) from bridgedb.email.server import addServer as addSMTPServer from bridgedb.https.server import addWebServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) proxies = proxy.ProxySet() emailDistributor = None ipDistributor = None # Save our state state.proxies = proxies state.key = key state.save() def reload(inThread=True): """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``hashring``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type hashring: A :class:`~bridgedb.Bridges.BridgeSplitter` :ivar hashring: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`~bridgedb.proxy.ProxySet` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`~bridgedb.https.distributor.HTTPSDistributor`. :ivar emailDistributor: A :class:`~bridgedb.email.distributor.EmailDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.info("Reloading the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: logging.info("Loading proxies from: %s" % proxyfile) proxy.loadProxiesFromFile(proxyfile, state.proxies, removeStale=True) logging.info("Reparsing bridge descriptors...") (hashring, emailDistributorTmp, ipDistributorTmp) = createBridgeRings(cfg, state.proxies, key) logging.info("Bridges loaded: %d" % len(hashring)) # Initialize our DB. bridgedb.Storage.initializeDBLock() bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") load(state, hashring, clear=False) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings logging.info( "Bridges allotted for %s distribution: %d" % (emailDistributorTmp.name, len(emailDistributorTmp.hashring))) else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings logging.info( "Bridges allotted for %s distribution: %d" % (ipDistributorTmp.name, len(ipDistributorTmp.hashring))) logging.info("\tNum bridges:\tFilter set:") nSubrings = 0 ipSubrings = ipDistributorTmp.hashring.filterRings for (ringname, (filterFn, subring)) in ipSubrings.items(): nSubrings += 1 filterSet = ' '.join( ipDistributorTmp.hashring.extractFilterNames(ringname)) logging.info("\t%2d bridges\t%s" % (len(subring), filterSet)) logging.info("Total subrings for %s: %d" % (ipDistributorTmp.name, nSubrings)) else: logging.warn("No HTTP(S) distributor created!") # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments to file: '%s'" % state.ASSIGNMENTS_FILE) fh = open(state.ASSIGNMENTS_FILE, 'a') fh.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) hashring.dumpAssignments(fh) fh.flush() fh.close() except IOError: logging.info("I/O error while writing assignments to: '%s'" % state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) signal.signal(signal.SIGUSR1, _handleSIGUSR1) if reactor: # And actually load it to start parsing. Get back our distributors. emailDistributor, ipDistributor = reload(False) # Configure all servers: if config.HTTPS_DIST and config.HTTPS_SHARE: addWebServer(config, ipDistributor) if config.EMAIL_DIST and config.EMAIL_SHARE: addSMTPServer(config, emailDistributor) tasks = {} # Setup all our repeating tasks: if config.TASKS['GET_TOR_EXIT_LIST']: tasks['GET_TOR_EXIT_LIST'] = task.LoopingCall( proxy.downloadTorExits, state.proxies, config.SERVER_PUBLIC_EXTERNAL_IP) if config.TASKS.get('DELETE_UNPARSEABLE_DESCRIPTORS'): delUnparseableSecs = config.TASKS['DELETE_UNPARSEABLE_DESCRIPTORS'] else: delUnparseableSecs = 24 * 60 * 60 # Default to 24 hours # We use the directory name of STATUS_FILE, since that directory # is where the *.unparseable descriptor files will be written to. tasks['DELETE_UNPARSEABLE_DESCRIPTORS'] = task.LoopingCall( runner.cleanupUnparseableDescriptors, os.path.dirname(config.STATUS_FILE), delUnparseableSecs) # Schedule all configured repeating tasks: for name, seconds in config.TASKS.items(): if seconds: try: tasks[name].start(abs(seconds)) except KeyError: logging.info("Task %s is disabled and will not run." % name) else: logging.info("Scheduled task %s to run every %s seconds." % (name, seconds)) # Actually run the servers. try: if reactor and not reactor.running: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: logging.fatal("Received keyboard interrupt. Shutting down...") finally: if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()
def run(options, reactor=reactor): """This is BridgeDB's main entry point and main runtime loop. Given the parsed commandline options, this function handles locating the configuration file, loading and parsing it, and then either (re)parsing plus (re)starting the servers, or dumping bridge assignments to files. :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. :param reactor: An implementer of :api:`twisted.internet.interfaces.IReactorCore`. This parameter is mainly for testing; the default :api:`twisted.internet.epollreactor.EPollReactor` is fine for normal application runs. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`safelog.configureLogging` is # called. Otherwise a default handler that logs to the console will be # created by the imported module, and all further calls to # :func:`logging.basicConfig` will be ignored. util.configureLogging(config) if options.subCommand is not None: runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() from bridgedb import persistent state = persistent.State(config=config) from bridgedb.distributors.email.server import addServer as addSMTPServer from bridgedb.distributors.https.server import addWebServer from bridgedb.distributors.moat.server import addMoatServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) proxies = proxy.ProxySet() emailDistributor = None ipDistributor = None moatDistributor = None # Save our state state.proxies = proxies state.key = key state.save() def reload(inThread=True): # pragma: no cover """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``hashring``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type hashring: A :class:`~bridgedb.Bridges.BridgeSplitter` :ivar hashring: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`~bridgedb.proxy.ProxySet` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`~bridgedb.distributors.https.distributor.HTTPSDistributor`. :ivar emailDistributor: A :class:`~bridgedb.distributors.email.distributor.EmailDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.info("Reloading the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: logging.info("Loading proxies from: %s" % proxyfile) proxy.loadProxiesFromFile(proxyfile, state.proxies, removeStale=True) logging.info("Reparsing bridge descriptors...") (hashring, emailDistributorTmp, ipDistributorTmp, moatDistributorTmp) = createBridgeRings(cfg, state.proxies, key) logging.info("Bridges loaded: %d" % len(hashring)) # Initialize our DB. bridgedb.Storage.initializeDBLock() bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") load(state, hashring, clear=False) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No HTTP(S) distributor created!") if moatDistributorTmp is not None: moatDistributorTmp.prepopulateRings() else: logging.warn("No Moat distributor created!") # Dump bridge pool assignments to disk. writeAssignments(hashring, state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if moatDistributorTmp: reactor.callFromThread(replaceBridgeRings, moatDistributor, moatDistributorTmp) if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) if reactor: # pragma: no cover # And actually load it to start parsing. Get back our distributors. emailDistributor, ipDistributor, moatDistributor = reload(False) # Configure all servers: if config.MOAT_DIST and config.MOAT_SHARE: addMoatServer(config, moatDistributor) if config.HTTPS_DIST and config.HTTPS_SHARE: addWebServer(config, ipDistributor) if config.EMAIL_DIST and config.EMAIL_SHARE: addSMTPServer(config, emailDistributor) tasks = {} # Setup all our repeating tasks: if config.TASKS['GET_TOR_EXIT_LIST']: tasks['GET_TOR_EXIT_LIST'] = task.LoopingCall( proxy.downloadTorExits, state.proxies, config.SERVER_PUBLIC_EXTERNAL_IP) if config.TASKS.get('DELETE_UNPARSEABLE_DESCRIPTORS'): delUnparseableSecs = config.TASKS['DELETE_UNPARSEABLE_DESCRIPTORS'] else: delUnparseableSecs = 24 * 60 * 60 # Default to 24 hours # We use the directory name of STATUS_FILE, since that directory # is where the *.unparseable descriptor files will be written to. tasks['DELETE_UNPARSEABLE_DESCRIPTORS'] = task.LoopingCall( runner.cleanupUnparseableDescriptors, os.path.dirname(config.STATUS_FILE), delUnparseableSecs) # Schedule all configured repeating tasks: for name, seconds in config.TASKS.items(): if seconds: try: # Set now to False to get the servers up and running when # first started, rather than spend a bunch of time in # scheduled tasks. tasks[name].start(abs(seconds), now=False) except KeyError: logging.info("Task %s is disabled and will not run." % name) else: logging.info("Scheduled task %s to run every %s seconds." % (name, seconds)) # Actually run the servers. try: if reactor and not reactor.running: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: # pragma: no cover logging.fatal("Received keyboard interrupt. Shutting down...") finally: if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()
def addWebServer(cfg, dist, sched): """Set up a web server for HTTP(S)-based bridge distribution. :type cfg: :class:`bridgedb.persistent.Conf` :param cfg: A configuration object from :mod:`bridgedb.Main`. Currently, we use these options:: HTTP_UNENCRYPTED_PORT HTTP_UNENCRYPTED_BIND_IP HTTP_USE_IP_FROM_FORWARDED_HEADER HTTPS_N_BRIDGES_PER_ANSWER HTTPS_INCLUDE_FINGERPRINTS HTTPS_KEY_FILE HTTPS_CERT_FILE HTTPS_PORT HTTPS_BIND_IP HTTPS_USE_IP_FROM_FORWARDED_HEADER RECAPTCHA_ENABLED RECAPTCHA_PUB_KEY RECAPTCHA_SEC_KEY RECAPTCHA_REMOTEIP GIMP_CAPTCHA_ENABLED GIMP_CAPTCHA_DIR GIMP_CAPTCHA_HMAC_KEYFILE GIMP_CAPTCHA_RSA_KEYFILE :type dist: :class:`bridgedb.Dist.IPBasedDistributor` :param dist: A bridge distributor. :type sched: :class:`bridgedb.schedule.ScheduledInterval` :param sched: The scheduled interval at which bridge selection, which are ultimately displayed on the :class:`WebResourceBridges` page, will be shifted. :raises SystemExit: if the servers cannot be started. :rtype: :api:`twisted.web.server.Site` :returns: A webserver. """ captcha = None fwdHeaders = cfg.HTTP_USE_IP_FROM_FORWARDED_HEADER numBridges = cfg.HTTPS_N_BRIDGES_PER_ANSWER fprInclude = cfg.HTTPS_INCLUDE_FINGERPRINTS logging.info("Starting web servers...") httpdist = resource.Resource() httpdist.putChild('', WebRoot()) httpdist.putChild('robots.txt', static.File(os.path.join(TEMPLATE_DIR, 'robots.txt'))) httpdist.putChild('keys', static.File(os.path.join(TEMPLATE_DIR, 'bridgedb.asc'))) httpdist.putChild('assets', static.File(os.path.join(TEMPLATE_DIR, 'assets/'))) httpdist.putChild('options', WebResourceOptions()) httpdist.putChild('howto', WebResourceHowto()) if cfg.RECAPTCHA_ENABLED: publicKey = cfg.RECAPTCHA_PUB_KEY secretKey = cfg.RECAPTCHA_SEC_KEY captcha = partial(ReCaptchaProtectedResource, remoteIP=cfg.RECAPTCHA_REMOTEIP) elif cfg.GIMP_CAPTCHA_ENABLED: # Get the master HMAC secret key for CAPTCHA challenges, and then # create a new HMAC key from it for use on the server. captchaKey = crypto.getKey(cfg.GIMP_CAPTCHA_HMAC_KEYFILE) hmacKey = crypto.getHMAC(captchaKey, "Captcha-Key") # Load or create our encryption keys: secretKey, publicKey = crypto.getRSAKey(cfg.GIMP_CAPTCHA_RSA_KEYFILE) captcha = partial(GimpCaptchaProtectedResource, hmacKey=hmacKey, captchaDir=cfg.GIMP_CAPTCHA_DIR) bridges = WebResourceBridges(dist, sched, numBridges, fwdHeaders, includeFingerprints=fprInclude) if captcha: # Protect the 'bridges' page with a CAPTCHA, if configured to do so: protected = captcha(publicKey=publicKey, secretKey=secretKey, useForwardedHeader=fwdHeaders, protectedResource=bridges) httpdist.putChild('bridges', protected) logging.info("Protecting resources with %s." % captcha.func.__name__) else: httpdist.putChild('bridges', bridges) site = server.Site(httpdist) site.displayTracebacks = False if cfg.HTTP_UNENCRYPTED_PORT: ip = cfg.HTTP_UNENCRYPTED_BIND_IP or "" port = cfg.HTTP_UNENCRYPTED_PORT or 80 try: reactor.listenTCP(port, site, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started HTTP server on %s:%d" % (str(ip), int(port))) if cfg.HTTPS_PORT: ip = cfg.HTTPS_BIND_IP or "" port = cfg.HTTPS_PORT or 443 try: from twisted.internet.ssl import DefaultOpenSSLContextFactory factory = DefaultOpenSSLContextFactory(cfg.HTTPS_KEY_FILE, cfg.HTTPS_CERT_FILE) reactor.listenSSL(port, site, factory, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started HTTPS server on %s:%d" % (str(ip), int(port))) return site
def addMoatServer(config, distributor): """Set up a web server for moat bridge distribution. :type config: :class:`bridgedb.persistent.Conf` :param config: A configuration object from :mod:`bridgedb.main`. Currently, we use these options:: GIMP_CAPTCHA_DIR SERVER_PUBLIC_FQDN SUPPORTED_TRANSPORTS MOAT_DIST MOAT_DIST_VIA_MEEK_ONLY MOAT_TLS_CERT_FILE MOAT_TLS_KEY_FILE MOAT_SERVER_PUBLIC_ROOT MOAT_HTTPS_IP MOAT_HTTPS_PORT MOAT_HTTP_IP MOAT_HTTP_PORT MOAT_BRIDGES_PER_ANSWER MOAT_TRANSPORT_PREFERENCE_LIST MOAT_USE_IP_FROM_FORWARDED_HEADER MOAT_SKIP_LOOPBACK_ADDRESSES MOAT_ROTATION_PERIOD MOAT_GIMP_CAPTCHA_HMAC_KEYFILE MOAT_GIMP_CAPTCHA_RSA_KEYFILE :type distributor: :class:`bridgedb.distributors.moat.distributor.MoatDistributor` :param distributor: A bridge distributor. :raises SystemExit: if the servers cannot be started. :rtype: :api:`twisted.web.server.Site` :returns: A webserver. """ captcha = None fwdHeaders = config.MOAT_USE_IP_FROM_FORWARDED_HEADER numBridges = config.MOAT_BRIDGES_PER_ANSWER skipLoopback = config.MOAT_SKIP_LOOPBACK_ADDRESSES logging.info("Starting moat servers...") setFQDN(config.SERVER_PUBLIC_FQDN) setRoot(config.MOAT_SERVER_PUBLIC_ROOT) setSupportedTransports(config.SUPPORTED_TRANSPORTS) setPreferredTransports(config.MOAT_TRANSPORT_PREFERENCE_LIST) # Get the master HMAC secret key for CAPTCHA challenges, and then # create a new HMAC key from it for use on the server. captchaKey = crypto.getKey(config.MOAT_GIMP_CAPTCHA_HMAC_KEYFILE) hmacKey = crypto.getHMAC(captchaKey, "Moat-Captcha-Key") # Load or create our encryption keys: secretKey, publicKey = crypto.getRSAKey(config.MOAT_GIMP_CAPTCHA_RSA_KEYFILE) sched = Unscheduled() if config.MOAT_ROTATION_PERIOD: count, period = config.MOAT_ROTATION_PERIOD.split() sched = ScheduledInterval(count, period) sitePublicDir = getRoot() meek = CustomErrorHandlingResource() moat = CustomErrorHandlingResource() fetch = CaptchaFetchResource(hmacKey, publicKey, secretKey, config.GIMP_CAPTCHA_DIR, fwdHeaders, skipLoopback) check = CaptchaCheckResource(distributor, sched, numBridges, hmacKey, publicKey, secretKey, fwdHeaders, skipLoopback) moat.putChild("fetch", fetch) moat.putChild("check", check) meek.putChild("moat", moat) root = CustomErrorHandlingResource() root.putChild("meek", meek) root.putChild("moat", moat) site = Site(root) site.displayTracebacks = False if config.MOAT_HTTP_PORT: # pragma: no cover ip = config.MOAT_HTTP_IP or "" port = config.MOAT_HTTP_PORT or 80 try: reactor.listenTCP(port, site, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started Moat HTTP server on %s:%d" % (str(ip), int(port))) if config.MOAT_HTTPS_PORT: # pragma: no cover ip = config.MOAT_HTTPS_IP or "" port = config.MOAT_HTTPS_PORT or 443 try: from twisted.internet.ssl import DefaultOpenSSLContextFactory factory = DefaultOpenSSLContextFactory(config.MOAT_TLS_KEY_FILE, config.MOAT_TLS_CERT_FILE) reactor.listenSSL(port, site, factory, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started Moat TLS server on %s:%d" % (str(ip), int(port))) return site
def addWebServer(config, distributor): """Set up a web server for HTTP(S)-based bridge distribution. :type config: :class:`bridgedb.persistent.Conf` :param config: A configuration object from :mod:`bridgedb.main`. Currently, we use these options:: HTTP_UNENCRYPTED_PORT HTTP_UNENCRYPTED_BIND_IP HTTP_USE_IP_FROM_FORWARDED_HEADER HTTPS_N_BRIDGES_PER_ANSWER HTTPS_INCLUDE_FINGERPRINTS HTTPS_KEY_FILE HTTPS_CERT_FILE HTTPS_PORT HTTPS_BIND_IP HTTPS_USE_IP_FROM_FORWARDED_HEADER HTTPS_ROTATION_PERIOD RECAPTCHA_ENABLED RECAPTCHA_PUB_KEY RECAPTCHA_SEC_KEY RECAPTCHA_REMOTEIP GIMP_CAPTCHA_ENABLED GIMP_CAPTCHA_DIR GIMP_CAPTCHA_HMAC_KEYFILE GIMP_CAPTCHA_RSA_KEYFILE SERVER_PUBLIC_FQDN CSP_ENABLED CSP_REPORT_ONLY CSP_INCLUDE_SELF :type distributor: :class:`bridgedb.distributors.https.distributor.HTTPSDistributor` :param distributor: A bridge distributor. :raises SystemExit: if the servers cannot be started. :rtype: :api:`twisted.web.server.Site` :returns: A webserver. """ captcha = None fwdHeaders = config.HTTP_USE_IP_FROM_FORWARDED_HEADER numBridges = config.HTTPS_N_BRIDGES_PER_ANSWER fprInclude = config.HTTPS_INCLUDE_FINGERPRINTS logging.info("Starting web servers...") setFQDN(config.SERVER_PUBLIC_FQDN) index = IndexResource() options = OptionsResource() howto = HowtoResource() robots = static.File(os.path.join(TEMPLATE_DIR, 'robots.txt')) assets = static.File(os.path.join(TEMPLATE_DIR, 'assets/')) keys = static.Data(bytes(strings.BRIDGEDB_OPENPGP_KEY), 'text/plain') csp = CSPResource(enabled=config.CSP_ENABLED, includeSelf=config.CSP_INCLUDE_SELF, reportViolations=config.CSP_REPORT_ONLY, useForwardedHeader=fwdHeaders) root = CustomErrorHandlingResource() root.putChild('', index) root.putChild('robots.txt', robots) root.putChild('keys', keys) root.putChild('assets', assets) root.putChild('options', options) root.putChild('howto', howto) root.putChild('maintenance', maintenance) root.putChild('error', resource500) root.putChild(CSPResource.reportURI, csp) if config.RECAPTCHA_ENABLED: publicKey = config.RECAPTCHA_PUB_KEY secretKey = config.RECAPTCHA_SEC_KEY captcha = partial(ReCaptchaProtectedResource, remoteIP=config.RECAPTCHA_REMOTEIP) elif config.GIMP_CAPTCHA_ENABLED: # Get the master HMAC secret key for CAPTCHA challenges, and then # create a new HMAC key from it for use on the server. captchaKey = crypto.getKey(config.GIMP_CAPTCHA_HMAC_KEYFILE) hmacKey = crypto.getHMAC(captchaKey, "Captcha-Key") # Load or create our encryption keys: secretKey, publicKey = crypto.getRSAKey(config.GIMP_CAPTCHA_RSA_KEYFILE) captcha = partial(GimpCaptchaProtectedResource, hmacKey=hmacKey, captchaDir=config.GIMP_CAPTCHA_DIR) if config.HTTPS_ROTATION_PERIOD: count, period = config.HTTPS_ROTATION_PERIOD.split() sched = ScheduledInterval(count, period) else: sched = Unscheduled() bridges = BridgesResource(distributor, sched, numBridges, fwdHeaders, includeFingerprints=fprInclude) if captcha: # Protect the 'bridges' page with a CAPTCHA, if configured to do so: protected = captcha(publicKey=publicKey, secretKey=secretKey, useForwardedHeader=fwdHeaders, protectedResource=bridges) root.putChild('bridges', protected) logging.info("Protecting resources with %s." % captcha.func.__name__) else: root.putChild('bridges', bridges) site = Site(root) site.displayTracebacks = False if config.HTTP_UNENCRYPTED_PORT: # pragma: no cover ip = config.HTTP_UNENCRYPTED_BIND_IP or "" port = config.HTTP_UNENCRYPTED_PORT or 80 try: reactor.listenTCP(port, site, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started HTTP server on %s:%d" % (str(ip), int(port))) if config.HTTPS_PORT: # pragma: no cover ip = config.HTTPS_BIND_IP or "" port = config.HTTPS_PORT or 443 try: from twisted.internet.ssl import DefaultOpenSSLContextFactory factory = DefaultOpenSSLContextFactory(config.HTTPS_KEY_FILE, config.HTTPS_CERT_FILE) reactor.listenSSL(port, site, factory, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started HTTPS server on %s:%d" % (str(ip), int(port))) return site
def startup(options): """Parse bridges, :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`safelog.configureLogging` is # called. Otherwise a default handler that logs to the console will be # created by the imported module, and all further calls to # :func:`logging.basicConfig` will be ignored. util.configureLogging(config) if options['dump-bridges'] or (options.subCommand is not None): runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() from bridgedb import persistent state = persistent.State(config=config) from bridgedb.email.server import addServer as addSMTPServer from bridgedb import HTTPServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) # Get a proxy list. proxyList = ProxyCategory() proxyList.replaceProxyList(loadProxyList(config)) emailDistributor = ipDistributor = None # Save our state state.proxyList = proxyList state.key = key state.save() def reload(inThread=True): """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``splitter``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type splitter: A :class:`bridgedb.Bridges.BridgeHolder` :ivar splitter: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`ProxyCategory` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`Dist.IPBasedDistributor`. :ivar emailDistributor: A :class:`Dist.EmailBasedDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.debug("Saving state again before reparsing descriptors...") state.save() logging.info("Reparsing bridge descriptors...") (splitter, emailDistributorTmp, ipDistributorTmp) = createBridgeRings(cfg, proxyList, key) # Initialize our DB. bridgedb.Storage.initializeDBLock() db = bridgedb.Storage.openOrConvertDatabase(cfg.DB_FILE + ".sqlite", cfg.DB_FILE) bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") load(state, splitter, clear=False) state = persistent.load() logging.info("Bridges loaded: %d" % len(splitter)) logging.debug("Replacing the list of open proxies...") state.proxyList.replaceProxyList(loadProxyList(cfg)) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (emailDistributorTmp.name, len(emailDistributorTmp.splitter))) else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (ipDistributorTmp.name, len(ipDistributorTmp.splitter))) logging.info("\tNum bridges:\tFilter set:") nSubrings = 0 ipSubrings = ipDistributorTmp.splitter.filterRings for (ringname, (filterFn, subring)) in ipSubrings.items(): nSubrings += 1 filterSet = ' '.join( ipDistributorTmp.splitter.extractFilterNames(ringname)) logging.info("\t%2d bridges\t%s" % (len(subring), filterSet)) logging.info("Total subrings for %s: %d" % (ipDistributorTmp.name, nSubrings)) else: logging.warn("No HTTP(S) distributor created!") # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments to file: '%s'" % state.ASSIGNMENTS_FILE) fh = open(state.ASSIGNMENTS_FILE, 'a') fh.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) splitter.dumpAssignments(fh) fh.flush() fh.close() except IOError: logging.info("I/O error while writing assignments to: '%s'" % state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) signal.signal(signal.SIGUSR1, _handleSIGUSR1) # And actually load it to start parsing. Get back our distributors. emailDistributor, ipDistributor = reload(False) # Configure all servers: if config.HTTPS_DIST and config.HTTPS_SHARE: #webSchedule = schedule.ScheduledInterval("day", 2) webSchedule = schedule.Unscheduled() HTTPServer.addWebServer(config, ipDistributor, webSchedule) if config.EMAIL_DIST and config.EMAIL_SHARE: #emailSchedule = schedule.ScheduledInterval("day", 1) emailSchedule = schedule.Unscheduled() addSMTPServer(config, emailDistributor, emailSchedule) # Actually run the servers. try: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: logging.fatal("Received keyboard interrupt. Shutting down...") finally: if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()
def addMoatServer(config, distributor): """Set up a web server for moat bridge distribution. :type config: :class:`bridgedb.persistent.Conf` :param config: A configuration object from :mod:`bridgedb.main`. Currently, we use these options:: GIMP_CAPTCHA_DIR SERVER_PUBLIC_FQDN SUPPORTED_TRANSPORTS MOAT_DIST MOAT_DIST_VIA_MEEK_ONLY MOAT_TLS_CERT_FILE MOAT_TLS_KEY_FILE MOAT_SERVER_PUBLIC_ROOT MOAT_HTTPS_IP MOAT_HTTPS_PORT MOAT_HTTP_IP MOAT_HTTP_PORT MOAT_BRIDGES_PER_ANSWER MOAT_TRANSPORT_PREFERENCE_LIST MOAT_USE_IP_FROM_FORWARDED_HEADER MOAT_SKIP_LOOPBACK_ADDRESSES MOAT_ROTATION_PERIOD MOAT_GIMP_CAPTCHA_HMAC_KEYFILE MOAT_GIMP_CAPTCHA_RSA_KEYFILE :type distributor: :class:`bridgedb.distributors.moat.distributor.MoatDistributor` :param distributor: A bridge distributor. :raises SystemExit: if the servers cannot be started. :rtype: :api:`twisted.web.server.Site` :returns: A webserver. """ captcha = None fwdHeaders = config.MOAT_USE_IP_FROM_FORWARDED_HEADER numBridges = config.MOAT_BRIDGES_PER_ANSWER skipLoopback = config.MOAT_SKIP_LOOPBACK_ADDRESSES logging.info("Starting moat servers...") setFQDN(config.SERVER_PUBLIC_FQDN) setRoot(config.MOAT_SERVER_PUBLIC_ROOT) setSupportedTransports(config.SUPPORTED_TRANSPORTS) setPreferredTransports(config.MOAT_TRANSPORT_PREFERENCE_LIST) # Get the master HMAC secret key for CAPTCHA challenges, and then # create a new HMAC key from it for use on the server. captchaKey = crypto.getKey(config.MOAT_GIMP_CAPTCHA_HMAC_KEYFILE) hmacKey = crypto.getHMAC(captchaKey, "Moat-Captcha-Key") # Load or create our encryption keys: secretKey, publicKey = crypto.getRSAKey( config.MOAT_GIMP_CAPTCHA_RSA_KEYFILE) sched = Unscheduled() if config.MOAT_ROTATION_PERIOD: count, period = config.MOAT_ROTATION_PERIOD.split() sched = ScheduledInterval(count, period) sitePublicDir = getRoot() meek = CustomErrorHandlingResource() moat = CustomErrorHandlingResource() fetch = CaptchaFetchResource(hmacKey, publicKey, secretKey, config.GIMP_CAPTCHA_DIR, fwdHeaders, skipLoopback) check = CaptchaCheckResource(distributor, sched, numBridges, hmacKey, publicKey, secretKey, fwdHeaders, skipLoopback) moat.putChild(b"fetch", fetch) moat.putChild(b"check", check) meek.putChild(b"moat", moat) root = CustomErrorHandlingResource() root.putChild(b"meek", meek) root.putChild(b"moat", moat) site = Site(root) site.displayTracebacks = False if config.MOAT_HTTP_PORT: # pragma: no cover ip = config.MOAT_HTTP_IP or "" port = config.MOAT_HTTP_PORT or 80 try: reactor.listenTCP(port, site, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started Moat HTTP server on %s:%d" % (str(ip), int(port))) if config.MOAT_HTTPS_PORT: # pragma: no cover ip = config.MOAT_HTTPS_IP or "" port = config.MOAT_HTTPS_PORT or 443 try: from twisted.internet.ssl import DefaultOpenSSLContextFactory factory = DefaultOpenSSLContextFactory(config.MOAT_TLS_KEY_FILE, config.MOAT_TLS_CERT_FILE) reactor.listenSSL(port, site, factory, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started Moat TLS server on %s:%d" % (str(ip), int(port))) return site
def addWebServer(config, distributor): """Set up a web server for HTTP(S)-based bridge distribution. :type config: :class:`bridgedb.persistent.Conf` :param config: A configuration object from :mod:`bridgedb.Main`. Currently, we use these options:: HTTP_UNENCRYPTED_PORT HTTP_UNENCRYPTED_BIND_IP HTTP_USE_IP_FROM_FORWARDED_HEADER HTTPS_N_BRIDGES_PER_ANSWER HTTPS_INCLUDE_FINGERPRINTS HTTPS_KEY_FILE HTTPS_CERT_FILE HTTPS_PORT HTTPS_BIND_IP HTTPS_USE_IP_FROM_FORWARDED_HEADER HTTPS_ROTATION_PERIOD RECAPTCHA_ENABLED RECAPTCHA_PUB_KEY RECAPTCHA_SEC_KEY RECAPTCHA_REMOTEIP GIMP_CAPTCHA_ENABLED GIMP_CAPTCHA_DIR GIMP_CAPTCHA_HMAC_KEYFILE GIMP_CAPTCHA_RSA_KEYFILE SERVER_PUBLIC_FQDN CSP_ENABLED CSP_REPORT_ONLY CSP_INCLUDE_SELF :type distributor: :class:`bridgedb.https.distributor.HTTPSDistributor` :param distributor: A bridge distributor. :raises SystemExit: if the servers cannot be started. :rtype: :api:`twisted.web.server.Site` :returns: A webserver. """ captcha = None fwdHeaders = config.HTTP_USE_IP_FROM_FORWARDED_HEADER numBridges = config.HTTPS_N_BRIDGES_PER_ANSWER fprInclude = config.HTTPS_INCLUDE_FINGERPRINTS logging.info("Starting web servers...") setFQDN(config.SERVER_PUBLIC_FQDN) index = IndexResource() options = OptionsResource() howto = HowtoResource() robots = static.File(os.path.join(TEMPLATE_DIR, 'robots.txt')) assets = static.File(os.path.join(TEMPLATE_DIR, 'assets/')) keys = static.Data(bytes(strings.BRIDGEDB_OPENPGP_KEY), 'text/plain') csp = CSPResource(enabled=config.CSP_ENABLED, includeSelf=config.CSP_INCLUDE_SELF, reportViolations=config.CSP_REPORT_ONLY, useForwardedHeader=fwdHeaders) root = CustomErrorHandlingResource() root.putChild('', index) root.putChild('robots.txt', robots) root.putChild('keys', keys) root.putChild('assets', assets) root.putChild('options', options) root.putChild('howto', howto) root.putChild('maintenance', maintenance) root.putChild('error', resource500) root.putChild(CSPResource.reportURI, csp) if config.RECAPTCHA_ENABLED: publicKey = config.RECAPTCHA_PUB_KEY secretKey = config.RECAPTCHA_SEC_KEY captcha = partial(ReCaptchaProtectedResource, remoteIP=config.RECAPTCHA_REMOTEIP) elif config.GIMP_CAPTCHA_ENABLED: # Get the master HMAC secret key for CAPTCHA challenges, and then # create a new HMAC key from it for use on the server. captchaKey = crypto.getKey(config.GIMP_CAPTCHA_HMAC_KEYFILE) hmacKey = crypto.getHMAC(captchaKey, "Captcha-Key") # Load or create our encryption keys: secretKey, publicKey = crypto.getRSAKey(config.GIMP_CAPTCHA_RSA_KEYFILE) captcha = partial(GimpCaptchaProtectedResource, hmacKey=hmacKey, captchaDir=config.GIMP_CAPTCHA_DIR) if config.HTTPS_ROTATION_PERIOD: count, period = config.HTTPS_ROTATION_PERIOD.split() sched = ScheduledInterval(count, period) else: sched = Unscheduled() bridges = BridgesResource(distributor, sched, numBridges, fwdHeaders, includeFingerprints=fprInclude) if captcha: # Protect the 'bridges' page with a CAPTCHA, if configured to do so: protected = captcha(publicKey=publicKey, secretKey=secretKey, useForwardedHeader=fwdHeaders, protectedResource=bridges) root.putChild('bridges', protected) logging.info("Protecting resources with %s." % captcha.func.__name__) else: root.putChild('bridges', bridges) site = Site(root) site.displayTracebacks = False if config.HTTP_UNENCRYPTED_PORT: # pragma: no cover ip = config.HTTP_UNENCRYPTED_BIND_IP or "" port = config.HTTP_UNENCRYPTED_PORT or 80 try: reactor.listenTCP(port, site, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started HTTP server on %s:%d" % (str(ip), int(port))) if config.HTTPS_PORT: # pragma: no cover ip = config.HTTPS_BIND_IP or "" port = config.HTTPS_PORT or 443 try: from twisted.internet.ssl import DefaultOpenSSLContextFactory factory = DefaultOpenSSLContextFactory(config.HTTPS_KEY_FILE, config.HTTPS_CERT_FILE) reactor.listenSSL(port, site, factory, interface=ip) except CannotListenError as error: raise SystemExit(error) logging.info("Started HTTPS server on %s:%d" % (str(ip), int(port))) return site
def startup(options): """Parse bridges, :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`configureLogging` is called. Otherwise a # default handler that logs to the console will be created by the imported # module, and all further calls to :func:`logging.basicConfig` will be # ignored. configureLogging(config) if options['dump-bridges'] or (options.subCommand is not None): runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() from bridgedb import persistent state = persistent.State(config=config) from bridgedb import EmailServer from bridgedb import HTTPServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) # Initialize our DB file. db = bridgedb.Storage.Database(config.DB_FILE + ".sqlite", config.DB_FILE) # TODO: move setGlobalDB to bridgedb.persistent.State class bridgedb.Storage.setGlobalDB(db) # Get a proxy list. proxyList = ProxyCategory() proxyList.replaceProxyList(loadProxyList(config)) # Create a BridgeSplitter to assign the bridges to the different # distributors. splitter = Bridges.BridgeSplitter(crypto.getHMAC(key, "Splitter-Key")) logging.debug("Created splitter: %r" % splitter) # Create ring parameters. ringParams = Bridges.BridgeRingParameters(needPorts=config.FORCE_PORTS, needFlags=config.FORCE_FLAGS) emailDistributor = ipDistributor = None # As appropriate, create an IP-based distributor. if config.HTTPS_DIST and config.HTTPS_SHARE: logging.debug("Setting up HTTPS Distributor...") categories = [] if proxyList.ipset: logging.debug("Adding proxyList to HTTPS Distributor categories.") categories.append(proxyList) logging.debug("HTTPS Distributor categories: '%s'" % categories) ipDistributor = Dist.IPBasedDistributor( Dist.uniformMap, config.N_IP_CLUSTERS, crypto.getHMAC(key, "HTTPS-IP-Dist-Key"), categories, answerParameters=ringParams) splitter.addRing(ipDistributor, "https", config.HTTPS_SHARE) #webSchedule = Time.IntervalSchedule("day", 2) webSchedule = Time.NoSchedule() # As appropriate, create an email-based distributor. if config.EMAIL_DIST and config.EMAIL_SHARE: logging.debug("Setting up Email Distributor...") emailDistributor = Dist.EmailBasedDistributor( crypto.getHMAC(key, "Email-Dist-Key"), config.EMAIL_DOMAIN_MAP.copy(), config.EMAIL_DOMAIN_RULES.copy(), answerParameters=ringParams) splitter.addRing(emailDistributor, "email", config.EMAIL_SHARE) #emailSchedule = Time.IntervalSchedule("day", 1) emailSchedule = Time.NoSchedule() # As appropriate, tell the splitter to leave some bridges unallocated. if config.RESERVED_SHARE: splitter.addRing(Bridges.UnallocatedHolder(), "unallocated", config.RESERVED_SHARE) # Add pseudo distributors to splitter for pseudoRing in config.FILE_BUCKETS.keys(): splitter.addPseudoRing(pseudoRing) # Save our state state.proxyList = proxyList state.key = key state.save() def reload(*args): """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``splitter``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type splitter: A :class:`bridgedb.Bridges.BridgeHolder` :ivar splitter: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`ProxyCategory` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`Dist.IPBasedDistributor`. :ivar emailDistributor: A :class:`Dist.EmailBasedDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.debug("Saving state again before reparsing descriptors...") state.save() logging.info("Reparsing bridge descriptors...") load(state, splitter, clear=False) state = persistent.load() logging.info("Bridges loaded: %d" % len(splitter)) logging.debug("Replacing the list of open proxies...") state.proxyList.replaceProxyList(loadProxyList(cfg)) if emailDistributor is not None: emailDistributor.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (emailDistributor.name, len(emailDistributor.splitter))) else: logging.warn("No email distributor created!") if ipDistributor is not None: ipDistributor.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (ipDistributor.name, len(ipDistributor.splitter))) logging.info("\tNum bridges:\tFilter set:") nSubrings = 0 ipSubrings = ipDistributor.splitter.filterRings for (ringname, (filterFn, subring)) in ipSubrings.items(): nSubrings += 1 filterSet = ' '.join( ipDistributor.splitter.extractFilterNames(ringname)) logging.info("\t%2d bridges\t%s" % (len(subring), filterSet)) logging.info("Total subrings for %s: %d" % (ipDistributor.name, nSubrings)) else: logging.warn("No HTTP(S) distributor created!") # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments to file: '%s'" % state.ASSIGNMENTS_FILE) fh = open(state.ASSIGNMENTS_FILE, 'a') fh.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) splitter.dumpAssignments(fh) fh.flush() fh.close() except IOError: logging.info("I/O error while writing assignments to: '%s'" % state.ASSIGNMENTS_FILE) state.save() global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) signal.signal(signal.SIGUSR1, _handleSIGUSR1) # And actually load it to start parsing. reload() # Configure all servers: if config.HTTPS_DIST and config.HTTPS_SHARE: HTTPServer.addWebServer(config, ipDistributor, webSchedule) if config.EMAIL_DIST and config.EMAIL_SHARE: EmailServer.addSMTPServer(config, emailDistributor, emailSchedule) # Actually run the servers. try: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: logging.fatal("Received keyboard interrupt. Shutting down...") finally: logging.info("Closing databases...") db.close() if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()