def setUp(self): """Create a :class:`HTTPServer.WebResourceBridges` and protect it with a :class:`GimpCaptchaProtectedResource`. """ # Create our cached CAPTCHA directory: self.captchaDir = 'captchas' if not os.path.isdir(self.captchaDir): os.makedirs(self.captchaDir) # Set up our resources to fake a minimal HTTP(S) server: self.pagename = b'captcha.html' self.root = Resource() # (None, None) is the (distributor, scheduleInterval): self.protectedResource = HTTPServer.WebResourceBridges(None, None) self.captchaResource = HTTPServer.GimpCaptchaProtectedResource( secretKey='42', publicKey='23', hmacKey='abcdefghijklmnopqrstuvwxyz012345', captchaDir='captchas', useForwardedHeader=True, protectedResource=self.protectedResource) self.root.putChild(self.pagename, self.captchaResource) # Set up the basic parts of our faked request: self.request = DummyRequest([self.pagename])
def setUp(self): self.dist = None self.sched = None self.pagename = b'bridges.html' self.root = Resource() self.protectedResource = HTTPServer.WebResourceBridges( self.dist, self.sched) self.captchaResource = HTTPServer.CaptchaProtectedResource( useForwardedHeader=True, protectedResource=self.protectedResource) self.root.putChild(self.pagename, self.captchaResource)
def setUp(self): """Create a :class:`HTTPServer.WebResourceOptions`.""" # Set up our resources to fake a minimal HTTP(S) server: self.pagename = b'options.html' self.root = Resource() self.optionsResource = HTTPServer.WebResourceOptions() self.root.putChild(self.pagename, self.optionsResource)
def setUp(self): """Create a :class:`HTTPServer.WebResourceBridges` and protect it with a :class:`ReCaptchaProtectedResource`. """ self.timeout = 10.0 # Can't take longer than that, right? # Set up our resources to fake a minimal HTTP(S) server: self.pagename = b'captcha.html' self.root = Resource() # (None, None) is the (distributor, scheduleInterval): self.protectedResource = HTTPServer.WebResourceBridges(None, None) self.captchaResource = HTTPServer.ReCaptchaProtectedResource( publicKey='23', secretKey='42', remoteIP='111.111.111.111', useForwardedHeader=True, protectedResource=self.protectedResource) self.root.putChild(self.pagename, self.captchaResource) # Set up the basic parts of our faked request: self.request = DummyRequest([self.pagename])
def test_render_GET_missingTemplate(self): """render_GET() with a missing template should raise an error and return the result of replaceErrorPage(). """ oldLookup = HTTPServer.lookup try: HTTPServer.lookup = None self.request.method = b'GET' page = self.captchaResource.render_GET(self.request) errorPage = HTTPServer.replaceErrorPage(Exception('kablam')) self.assertEqual(page, errorPage) finally: HTTPServer.lookup = oldLookup
def setUp(self): """Set up our resources to fake a minimal HTTP(S) server.""" self.pagename = b'bridges.html' self.root = Resource() self.dist = DummyIPBasedDistributor() self.sched = ScheduledInterval(1, 'hour') self.nBridgesPerRequest = 2 self.bridgesResource = HTTPServer.WebResourceBridges( self.dist, self.sched, N=2, #useForwardedHeader=True, includeFingerprints=True) self.root.putChild(self.pagename, self.bridgesResource)
def test_replaceErrorPage(self): """``replaceErrorPage`` should return the expected html.""" exc = Exception("vegan gümmibären") errorPage = HTTPServer.replaceErrorPage(exc) self.assertSubstring("Something went wrong", errorPage) self.assertNotSubstring("vegan gümmibären", errorPage)
def startup(cfg): """Parse bridges, """ # Expand any ~ characters in paths in the configuration. cfg.BRIDGE_FILES = [os.path.expanduser(fn) for fn in cfg.BRIDGE_FILES] for key in ( "RUN_IN_DIR", "DB_FILE", "DB_LOG_FILE", "MASTER_KEY_FILE", "ASSIGNMENTS_FILE", "HTTPS_CERT_FILE", "HTTPS_KEY_FILE", "PIDFILE", "LOGFILE", "STATUS_FILE", ): v = getattr(cfg, key, None) if v: setattr(cfg, key, os.path.expanduser(v)) if hasattr(cfg, "PROXY_LIST_FILES"): cfg.PROXY_LIST_FILES = [os.path.expanduser(v) for v in cfg.PROXY_LIST_FILES] else: cfg.PROXY_LIST_FILES = [] # Write the pidfile. if cfg.PIDFILE: f = open(cfg.PIDFILE, "w") f.write("%s\n" % os.getpid()) f.close() # Set up logging. configureLogging(cfg) # XXX import Servers after logging is set up # Otherwise, python will create a default handler that logs to # the console and ignore further basicConfig calls from bridgedb import EmailServer from bridgedb import HTTPServer # Load the master key, or create a new one. key = getKey(cfg.MASTER_KEY_FILE) # Initialize our DB file. db = bridgedb.Storage.Database(cfg.DB_FILE + ".sqlite", cfg.DB_FILE) bridgedb.Storage.setGlobalDB(db) # Get a proxy list. proxyList = ProxyCategory() proxyList.replaceProxyList(loadProxyList(cfg)) # Create a BridgeSplitter to assign the bridges to the different # distributors. splitter = Bridges.BridgeSplitter(Bridges.get_hmac(key, "Splitter-Key")) # Create ring parameters. forcePorts = getattr(cfg, "FORCE_PORTS") forceFlags = getattr(cfg, "FORCE_FLAGS") if not forcePorts: forcePorts = [] if not forceFlags: forceFlags = [] ringParams = Bridges.BridgeRingParameters(needPorts=forcePorts, needFlags=forceFlags) emailDistributor = ipDistributor = None # As appropriate, create an IP-based distributor. if cfg.HTTPS_DIST and cfg.HTTPS_SHARE: categories = [] if proxyList.ipset: categories.append(proxyList) ipDistributor = Dist.IPBasedDistributor( Dist.uniformMap, cfg.N_IP_CLUSTERS, Bridges.get_hmac(key, "HTTPS-IP-Dist-Key"), categories, answerParameters=ringParams, ) splitter.addRing(ipDistributor, "https", cfg.HTTPS_SHARE) # webSchedule = Time.IntervalSchedule("day", 2) webSchedule = Time.NoSchedule() # As appropriate, create an email-based distributor. if cfg.EMAIL_DIST and cfg.EMAIL_SHARE: for d in cfg.EMAIL_DOMAINS: cfg.EMAIL_DOMAIN_MAP[d] = d emailDistributor = Dist.EmailBasedDistributor( Bridges.get_hmac(key, "Email-Dist-Key"), cfg.EMAIL_DOMAIN_MAP.copy(), cfg.EMAIL_DOMAIN_RULES.copy(), answerParameters=ringParams, ) splitter.addRing(emailDistributor, "email", cfg.EMAIL_SHARE) # emailSchedule = Time.IntervalSchedule("day", 1) emailSchedule = Time.NoSchedule() # As appropriate, tell the splitter to leave some bridges unallocated. if cfg.RESERVED_SHARE: splitter.addRing(Bridges.UnallocatedHolder(), "unallocated", cfg.RESERVED_SHARE) # Add pseudo distributors to splitter for p in cfg.FILE_BUCKETS.keys(): splitter.addPseudoRing(p) # Make the parse-bridges function get re-called on SIGHUP. def reload(): logging.info("Caught SIGHUP") # re open config file options, arguments = Opt.parseOpts() configuration = {} if options.configfile: execfile(options.configfile, configuration) cfg = Conf(**configuration) # update loglevel on (re)load level = getattr(cfg, "LOGLEVEL", "WARNING") level = getattr(logging, level) logging.getLogger().setLevel(level) load(cfg, splitter, clear=True) proxyList.replaceProxyList(loadProxyList(cfg)) logging.info("%d bridges loaded", len(splitter)) if emailDistributor: emailDistributor.prepopulateRings() # create default rings logging.info("%d for email", len(emailDistributor.splitter)) if ipDistributor: ipDistributor.prepopulateRings() # create default rings logging.info("%d for web:", len(ipDistributor.splitter)) for (n, (f, r)) in ipDistributor.splitter.filterRings.items(): logging.info(" by filter set %s, %d" % (n, len(r))) # logging.info(" by location set: %s", # " ".join(str(len(r)) for r in ipDistributor.rings)) # logging.info(" by category set: %s", # " ".join(str(len(r)) for r in ipDistributor.categoryRings)) # logging.info("Here are all known bridges in the category section:") # for r in ipDistributor.categoryRings: # for name, b in r.bridges.items(): # logging.info("%s" % b.getConfigLine(True)) # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments file") f = open(cfg.ASSIGNMENTS_FILE, "a") f.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) splitter.dumpAssignments(f) f.close() except IOError: logging.info("I/O error while writing assignments") global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) # And actually load it to start. reload() # Configure HTTP and/or HTTPS servers. if cfg.HTTPS_DIST and cfg.HTTPS_SHARE: HTTPServer.addWebServer(cfg, ipDistributor, webSchedule) # Configure Email servers. if cfg.EMAIL_DIST and cfg.EMAIL_SHARE: EmailServer.addSMTPServer(cfg, emailDistributor, emailSchedule) # Actually run the servers. try: logging.info("Starting reactors.") reactor.run() finally: db.close() if cfg.PIDFILE: os.unlink(cfg.PIDFILE)
def startup(options): """Parse bridges, :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`safelog.configureLogging` is # called. Otherwise a default handler that logs to the console will be # created by the imported module, and all further calls to # :func:`logging.basicConfig` will be ignored. util.configureLogging(config) if options['dump-bridges'] or (options.subCommand is not None): runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() from bridgedb import persistent state = persistent.State(config=config) from bridgedb.email.server import addServer as addSMTPServer from bridgedb import HTTPServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) # Get a proxy list. proxyList = ProxyCategory() proxyList.replaceProxyList(loadProxyList(config)) emailDistributor = ipDistributor = None # Save our state state.proxyList = proxyList state.key = key state.save() def reload(inThread=True): """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``splitter``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type splitter: A :class:`bridgedb.Bridges.BridgeHolder` :ivar splitter: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`ProxyCategory` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`Dist.IPBasedDistributor`. :ivar emailDistributor: A :class:`Dist.EmailBasedDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.debug("Saving state again before reparsing descriptors...") state.save() logging.info("Reparsing bridge descriptors...") (splitter, emailDistributorTmp, ipDistributorTmp) = createBridgeRings(cfg, proxyList, key) # Initialize our DB. bridgedb.Storage.initializeDBLock() db = bridgedb.Storage.openOrConvertDatabase(cfg.DB_FILE + ".sqlite", cfg.DB_FILE) bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") load(state, splitter, clear=False) state = persistent.load() logging.info("Bridges loaded: %d" % len(splitter)) logging.debug("Replacing the list of open proxies...") state.proxyList.replaceProxyList(loadProxyList(cfg)) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (emailDistributorTmp.name, len(emailDistributorTmp.splitter))) else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (ipDistributorTmp.name, len(ipDistributorTmp.splitter))) logging.info("\tNum bridges:\tFilter set:") nSubrings = 0 ipSubrings = ipDistributorTmp.splitter.filterRings for (ringname, (filterFn, subring)) in ipSubrings.items(): nSubrings += 1 filterSet = ' '.join( ipDistributorTmp.splitter.extractFilterNames(ringname)) logging.info("\t%2d bridges\t%s" % (len(subring), filterSet)) logging.info("Total subrings for %s: %d" % (ipDistributorTmp.name, nSubrings)) else: logging.warn("No HTTP(S) distributor created!") # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments to file: '%s'" % state.ASSIGNMENTS_FILE) fh = open(state.ASSIGNMENTS_FILE, 'a') fh.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) splitter.dumpAssignments(fh) fh.flush() fh.close() except IOError: logging.info("I/O error while writing assignments to: '%s'" % state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) signal.signal(signal.SIGUSR1, _handleSIGUSR1) # And actually load it to start parsing. Get back our distributors. emailDistributor, ipDistributor = reload(False) # Configure all servers: if config.HTTPS_DIST and config.HTTPS_SHARE: #webSchedule = schedule.ScheduledInterval("day", 2) webSchedule = schedule.Unscheduled() HTTPServer.addWebServer(config, ipDistributor, webSchedule) if config.EMAIL_DIST and config.EMAIL_SHARE: #emailSchedule = schedule.ScheduledInterval("day", 1) emailSchedule = schedule.Unscheduled() addSMTPServer(config, emailDistributor, emailSchedule) # Actually run the servers. try: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: logging.fatal("Received keyboard interrupt. Shutting down...") finally: if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()
def startup(cfg): """Parse bridges, """ # Expand any ~ characters in paths in the configuration. cfg.BRIDGE_FILES = [os.path.expanduser(fn) for fn in cfg.BRIDGE_FILES] for key in ("RUN_IN_DIR", "DB_FILE", "DB_LOG_FILE", "MASTER_KEY_FILE", "ASSIGNMENTS_FILE", "HTTPS_CERT_FILE", "HTTPS_KEY_FILE", "PIDFILE", "LOGFILE", "STATUS_FILE"): v = getattr(cfg, key, None) if v: setattr(cfg, key, os.path.expanduser(v)) if hasattr(cfg, "PROXY_LIST_FILES"): cfg.PROXY_LIST_FILES = [ os.path.expanduser(v) for v in cfg.PROXY_LIST_FILES ] else: cfg.PROXY_LIST_FILES = [] # Write the pidfile. if cfg.PIDFILE: f = open(cfg.PIDFILE, 'w') f.write("%s\n" % os.getpid()) f.close() # Set up logging. configureLogging(cfg) #XXX import Servers after logging is set up # Otherwise, python will create a default handler that logs to # the console and ignore further basicConfig calls from bridgedb import EmailServer from bridgedb import HTTPServer # Load the master key, or create a new one. key = getKey(cfg.MASTER_KEY_FILE) # Initialize our DB file. db = bridgedb.Storage.Database(cfg.DB_FILE + ".sqlite", cfg.DB_FILE) bridgedb.Storage.setGlobalDB(db) # Get a proxy list. proxyList = ProxyCategory() proxyList.replaceProxyList(loadProxyList(cfg)) # Create a BridgeSplitter to assign the bridges to the different # distributors. splitter = Bridges.BridgeSplitter(Bridges.get_hmac(key, "Splitter-Key")) # Create ring parameters. forcePorts = getattr(cfg, "FORCE_PORTS") forceFlags = getattr(cfg, "FORCE_FLAGS") if not forcePorts: forcePorts = [] if not forceFlags: forceFlags = [] ringParams = Bridges.BridgeRingParameters(needPorts=forcePorts, needFlags=forceFlags) emailDistributor = ipDistributor = None # As appropriate, create an IP-based distributor. if cfg.HTTPS_DIST and cfg.HTTPS_SHARE: categories = [] if proxyList.ipset: categories.append(proxyList) ipDistributor = Dist.IPBasedDistributor(Dist.uniformMap, cfg.N_IP_CLUSTERS, Bridges.get_hmac( key, "HTTPS-IP-Dist-Key"), categories, answerParameters=ringParams) splitter.addRing(ipDistributor, "https", cfg.HTTPS_SHARE) #webSchedule = Time.IntervalSchedule("day", 2) webSchedule = Time.NoSchedule() # As appropriate, create an email-based distributor. if cfg.EMAIL_DIST and cfg.EMAIL_SHARE: for d in cfg.EMAIL_DOMAINS: cfg.EMAIL_DOMAIN_MAP[d] = d emailDistributor = Dist.EmailBasedDistributor( Bridges.get_hmac(key, "Email-Dist-Key"), cfg.EMAIL_DOMAIN_MAP.copy(), cfg.EMAIL_DOMAIN_RULES.copy(), answerParameters=ringParams) splitter.addRing(emailDistributor, "email", cfg.EMAIL_SHARE) #emailSchedule = Time.IntervalSchedule("day", 1) emailSchedule = Time.NoSchedule() # As appropriate, tell the splitter to leave some bridges unallocated. if cfg.RESERVED_SHARE: splitter.addRing(Bridges.UnallocatedHolder(), "unallocated", cfg.RESERVED_SHARE) # Add pseudo distributors to splitter for p in cfg.FILE_BUCKETS.keys(): splitter.addPseudoRing(p) # Make the parse-bridges function get re-called on SIGHUP. def reload(): logging.info("Caught SIGHUP") # re open config file options, arguments = Opt.parseOpts() configuration = {} if options.configfile: execfile(options.configfile, configuration) cfg = Conf(**configuration) # update loglevel on (re)load level = getattr(cfg, 'LOGLEVEL', 'WARNING') level = getattr(logging, level) logging.getLogger().setLevel(level) load(cfg, splitter, clear=True) proxyList.replaceProxyList(loadProxyList(cfg)) logging.info("%d bridges loaded", len(splitter)) if emailDistributor: emailDistributor.prepopulateRings() # create default rings logging.info("%d for email", len(emailDistributor.splitter)) if ipDistributor: ipDistributor.prepopulateRings() # create default rings logging.info("%d for web:", len(ipDistributor.splitter)) for (n, (f, r)) in ipDistributor.splitter.filterRings.items(): logging.info(" by filter set %s, %d" % (n, len(r))) #logging.info(" by location set: %s", # " ".join(str(len(r)) for r in ipDistributor.rings)) #logging.info(" by category set: %s", # " ".join(str(len(r)) for r in ipDistributor.categoryRings)) #logging.info("Here are all known bridges in the category section:") #for r in ipDistributor.categoryRings: # for name, b in r.bridges.items(): # logging.info("%s" % b.getConfigLine(True)) # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments file") f = open(cfg.ASSIGNMENTS_FILE, 'a') f.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) splitter.dumpAssignments(f) f.close() except IOError: logging.info("I/O error while writing assignments") global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) # And actually load it to start. reload() # Configure HTTP and/or HTTPS servers. if cfg.HTTPS_DIST and cfg.HTTPS_SHARE: HTTPServer.addWebServer(cfg, ipDistributor, webSchedule) # Configure Email servers. if cfg.EMAIL_DIST and cfg.EMAIL_SHARE: EmailServer.addSMTPServer(cfg, emailDistributor, emailSchedule) # Actually run the servers. try: logging.info("Starting reactors.") reactor.run() finally: db.close() if cfg.PIDFILE: os.unlink(cfg.PIDFILE)
def startup(options): """Parse bridges, :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`configureLogging` is called. Otherwise a # default handler that logs to the console will be created by the imported # module, and all further calls to :func:`logging.basicConfig` will be # ignored. configureLogging(config) if options['dump-bridges'] or (options.subCommand is not None): runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() from bridgedb import persistent state = persistent.State(config=config) from bridgedb import EmailServer from bridgedb import HTTPServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) # Initialize our DB file. db = bridgedb.Storage.Database(config.DB_FILE + ".sqlite", config.DB_FILE) # TODO: move setGlobalDB to bridgedb.persistent.State class bridgedb.Storage.setGlobalDB(db) # Get a proxy list. proxyList = ProxyCategory() proxyList.replaceProxyList(loadProxyList(config)) # Create a BridgeSplitter to assign the bridges to the different # distributors. splitter = Bridges.BridgeSplitter(crypto.getHMAC(key, "Splitter-Key")) logging.debug("Created splitter: %r" % splitter) # Create ring parameters. ringParams = Bridges.BridgeRingParameters(needPorts=config.FORCE_PORTS, needFlags=config.FORCE_FLAGS) emailDistributor = ipDistributor = None # As appropriate, create an IP-based distributor. if config.HTTPS_DIST and config.HTTPS_SHARE: logging.debug("Setting up HTTPS Distributor...") categories = [] if proxyList.ipset: logging.debug("Adding proxyList to HTTPS Distributor categories.") categories.append(proxyList) logging.debug("HTTPS Distributor categories: '%s'" % categories) ipDistributor = Dist.IPBasedDistributor( Dist.uniformMap, config.N_IP_CLUSTERS, crypto.getHMAC(key, "HTTPS-IP-Dist-Key"), categories, answerParameters=ringParams) splitter.addRing(ipDistributor, "https", config.HTTPS_SHARE) #webSchedule = Time.IntervalSchedule("day", 2) webSchedule = Time.NoSchedule() # As appropriate, create an email-based distributor. if config.EMAIL_DIST and config.EMAIL_SHARE: logging.debug("Setting up Email Distributor...") emailDistributor = Dist.EmailBasedDistributor( crypto.getHMAC(key, "Email-Dist-Key"), config.EMAIL_DOMAIN_MAP.copy(), config.EMAIL_DOMAIN_RULES.copy(), answerParameters=ringParams) splitter.addRing(emailDistributor, "email", config.EMAIL_SHARE) #emailSchedule = Time.IntervalSchedule("day", 1) emailSchedule = Time.NoSchedule() # As appropriate, tell the splitter to leave some bridges unallocated. if config.RESERVED_SHARE: splitter.addRing(Bridges.UnallocatedHolder(), "unallocated", config.RESERVED_SHARE) # Add pseudo distributors to splitter for pseudoRing in config.FILE_BUCKETS.keys(): splitter.addPseudoRing(pseudoRing) # Save our state state.proxyList = proxyList state.key = key state.save() def reload(*args): """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``splitter``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type splitter: A :class:`bridgedb.Bridges.BridgeHolder` :ivar splitter: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`ProxyCategory` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`Dist.IPBasedDistributor`. :ivar emailDistributor: A :class:`Dist.EmailBasedDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.debug("Saving state again before reparsing descriptors...") state.save() logging.info("Reparsing bridge descriptors...") load(state, splitter, clear=False) state = persistent.load() logging.info("Bridges loaded: %d" % len(splitter)) logging.debug("Replacing the list of open proxies...") state.proxyList.replaceProxyList(loadProxyList(cfg)) if emailDistributor is not None: emailDistributor.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (emailDistributor.name, len(emailDistributor.splitter))) else: logging.warn("No email distributor created!") if ipDistributor is not None: ipDistributor.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (ipDistributor.name, len(ipDistributor.splitter))) logging.info("\tNum bridges:\tFilter set:") nSubrings = 0 ipSubrings = ipDistributor.splitter.filterRings for (ringname, (filterFn, subring)) in ipSubrings.items(): nSubrings += 1 filterSet = ' '.join( ipDistributor.splitter.extractFilterNames(ringname)) logging.info("\t%2d bridges\t%s" % (len(subring), filterSet)) logging.info("Total subrings for %s: %d" % (ipDistributor.name, nSubrings)) else: logging.warn("No HTTP(S) distributor created!") # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments to file: '%s'" % state.ASSIGNMENTS_FILE) fh = open(state.ASSIGNMENTS_FILE, 'a') fh.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) splitter.dumpAssignments(fh) fh.flush() fh.close() except IOError: logging.info("I/O error while writing assignments to: '%s'" % state.ASSIGNMENTS_FILE) state.save() global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) signal.signal(signal.SIGUSR1, _handleSIGUSR1) # And actually load it to start parsing. reload() # Configure all servers: if config.HTTPS_DIST and config.HTTPS_SHARE: HTTPServer.addWebServer(config, ipDistributor, webSchedule) if config.EMAIL_DIST and config.EMAIL_SHARE: EmailServer.addSMTPServer(config, emailDistributor, emailSchedule) # Actually run the servers. try: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: logging.fatal("Received keyboard interrupt. Shutting down...") finally: logging.info("Closing databases...") db.close() if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()
def run(options, reactor=reactor): """This is BridgeDB's main entry point and main runtime loop. Given the parsed commandline options, this function handles locating the configuration file, loading and parsing it, and then either (re)parsing plus (re)starting the servers, or dumping bridge assignments to files. :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. :param reactor: An implementer of :api:`twisted.internet.interfaces.IReactorCore`. This parameter is mainly for testing; the default :api:`twisted.internet.epollreactor.EPollReactor` is fine for normal application runs. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`safelog.configureLogging` is # called. Otherwise a default handler that logs to the console will be # created by the imported module, and all further calls to # :func:`logging.basicConfig` will be ignored. util.configureLogging(config) if options['dump-bridges'] or (options.subCommand is not None): runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() from bridgedb import persistent state = persistent.State(config=config) from bridgedb.email.server import addServer as addSMTPServer from bridgedb import HTTPServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) # Get a proxy list. proxyList = proxy.ProxySet() for proxyfile in config.PROXY_LIST_FILES: logging.info("Loading proxies from: %s" % proxyfile) proxy.loadProxiesFromFile(proxyfile, proxyList) emailDistributor = ipDistributor = None # Save our state state.proxyList = proxyList state.key = key state.save() def reload(inThread=True): """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``splitter``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type splitter: A :class:`bridgedb.Bridges.BridgeHolder` :ivar splitter: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`~bridgedb.proxy.ProxySet` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`Dist.IPBasedDistributor`. :ivar emailDistributor: A :class:`Dist.EmailBasedDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.debug("Saving state again before reparsing descriptors...") state.save() logging.info("Reparsing bridge descriptors...") (splitter, emailDistributorTmp, ipDistributorTmp) = createBridgeRings(cfg, proxyList, key) # Initialize our DB. bridgedb.Storage.initializeDBLock() db = bridgedb.Storage.openDatabase(cfg.DB_FILE + ".sqlite") bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") load(state, splitter, clear=False) state = persistent.load() logging.info("Bridges loaded: %d" % len(splitter)) logging.debug("Replacing the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: proxy.loadProxiesFromFile(proxyfile, state.proxyList, removeStale=True) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings logging.info( "Bridges allotted for %s distribution: %d" % (emailDistributorTmp.name, len(emailDistributorTmp.splitter))) else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings logging.info( "Bridges allotted for %s distribution: %d" % (ipDistributorTmp.name, len(ipDistributorTmp.splitter))) logging.info("\tNum bridges:\tFilter set:") nSubrings = 0 ipSubrings = ipDistributorTmp.splitter.filterRings for (ringname, (filterFn, subring)) in ipSubrings.items(): nSubrings += 1 filterSet = ' '.join( ipDistributorTmp.splitter.extractFilterNames(ringname)) logging.info("\t%2d bridges\t%s" % (len(subring), filterSet)) logging.info("Total subrings for %s: %d" % (ipDistributorTmp.name, nSubrings)) else: logging.warn("No HTTP(S) distributor created!") # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments to file: '%s'" % state.ASSIGNMENTS_FILE) fh = open(state.ASSIGNMENTS_FILE, 'a') fh.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) splitter.dumpAssignments(fh) fh.flush() fh.close() except IOError: logging.info("I/O error while writing assignments to: '%s'" % state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) signal.signal(signal.SIGUSR1, _handleSIGUSR1) if reactor: # And actually load it to start parsing. Get back our distributors. emailDistributor, ipDistributor = reload(False) # Configure all servers: if config.HTTPS_DIST and config.HTTPS_SHARE: HTTPServer.addWebServer(config, ipDistributor) if config.EMAIL_DIST and config.EMAIL_SHARE: addSMTPServer(config, emailDistributor) tasks = {} # Setup all our repeating tasks: if config.TASKS['GET_TOR_EXIT_LIST']: tasks['GET_TOR_EXIT_LIST'] = task.LoopingCall( proxy.downloadTorExits, proxyList, config.SERVER_PUBLIC_EXTERNAL_IP) # Schedule all configured repeating tasks: for name, seconds in config.TASKS.items(): if seconds: try: tasks[name].start(abs(seconds)) except KeyError: logging.info("Task %s is disabled and will not run." % name) else: logging.info("Scheduled task %s to run every %s seconds." % (name, seconds)) # Actually run the servers. try: if reactor and not reactor.running: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: logging.fatal("Received keyboard interrupt. Shutting down...") finally: if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()