def test_proxy_loadProxiesFromFile_duplicates(self): """Loading proxies from the same file twice shouldn't store duplicates. """ proxyList = proxy.ProxySet(['1.1.1.1']) proxy.loadProxiesFromFile(self.fn1, proxySet=proxyList) self.assertEqual(len(proxyList), 6) proxy.loadProxiesFromFile(self.fn1, proxySet=proxyList) self.assertEqual(len(proxyList), 6)
def test_ProxySet_difference(self): """ProxySet.difference() should list the items in ProxySetA which aren't in ProxySetB. """ proxySetA = self.proxyList proxySetB = proxy.ProxySet(self.moarProxies) self.assertItemsEqual(proxySetA.difference(proxySetB), set(self.proxies)) self.assertItemsEqual(proxySetB.difference(proxySetA), set(self.moarProxies))
def __init__(self, totalSubrings, key, proxies=None, answerParameters=None): """Create a Distributor that decides which bridges to distribute based upon the client's IP address and the current time. :param int totalSubrings: The number of subhashrings to group clients into. Note that if ``PROXY_LIST_FILES`` is set in bridgedb.conf, then the actual number of clusters is one higher than ``totalSubrings``, because the set of all known open proxies is given its own subhashring. :param bytes key: The master HMAC key for this distributor. All added bridges are HMACed with this key in order to place them into the hashrings. :type proxies: :class:`~bridgedb.proxy.ProxySet` :param proxies: A :class:`bridgedb.proxy.ProxySet` containing known Tor Exit relays and other known proxies. These will constitute the extra cluster, and any client requesting bridges from one of these **proxies** will be distributed bridges from a separate subhashring that is specific to Tor/proxy users. :type answerParameters: :class:`bridgedb.bridgerings.BridgeRingParameters` :param answerParameters: A mechanism for ensuring that the set of bridges that this distributor answers a client with fit certain parameters, i.e. that an answer has "at least two obfsproxy bridges" or "at least one bridge on port 443", etc. """ super(HTTPSDistributor, self).__init__(key) self.totalSubrings = totalSubrings self.answerParameters = answerParameters if proxies: logging.info("Added known proxies to HTTPS distributor...") self.proxies = proxies self.totalSubrings += 1 self.proxySubring = self.totalSubrings else: logging.warn("No known proxies were added to HTTPS distributor!") self.proxies = proxy.ProxySet() self.proxySubring = 0 self.ringCacheSize = self.totalSubrings * 3 key2 = getHMAC(key, "Assign-Bridges-To-Rings") key3 = getHMAC(key, "Order-Areas-In-Rings") key4 = getHMAC(key, "Assign-Areas-To-Rings") self._clientToPositionHMAC = getHMACFunc(key3, hex=False) self._subnetToSubringHMAC = getHMACFunc(key4, hex=True) self.hashring = FilteredBridgeSplitter(key2, self.ringCacheSize) self.name = 'HTTPS' logging.debug("Added %s to %s distributor." % (self.hashring.__class__.__name__, self.name))
def test_proxy_loadProxiesFromFile_removeStale(self): """Test loading proxies from two files and removing the stale ones.""" proxyList = proxy.ProxySet(['1.1.1.1']) self.assertEqual(len(proxyList), 1) proxies = proxy.loadProxiesFromFile(self.fn0, proxySet=proxyList) self.assertEqual(len(proxies), 3) self.assertEqual(len(proxyList), 4) proxies = proxy.loadProxiesFromFile(self.fn1, proxySet=proxyList) self.assertEqual(len(proxies), 5) self.assertEqual(len(proxyList), 9) self.emptyFile(self.fn0) proxies = proxy.loadProxiesFromFile(self.fn0, proxySet=proxyList, removeStale=True) self.assertEqual(len(proxies), 0) self.assertEqual(len(proxyList), 6)
def run(options, reactor=reactor): """This is BridgeDB's main entry point and main runtime loop. Given the parsed commandline options, this function handles locating the configuration file, loading and parsing it, and then either (re)parsing plus (re)starting the servers, or dumping bridge assignments to files. :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. :param reactor: An implementer of :api:`twisted.internet.interfaces.IReactorCore`. This parameter is mainly for testing; the default :api:`twisted.internet.epollreactor.EPollReactor` is fine for normal application runs. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`safelog.configureLogging` is # called. Otherwise a default handler that logs to the console will be # created by the imported module, and all further calls to # :func:`logging.basicConfig` will be ignored. util.configureLogging(config) if options.subCommand is not None: runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() # Let our pluggable transport class know what transports are resistant to # active probing. We need to know because we shouldn't hand out a # probing-vulnerable transport on a bridge that supports a # probing-resistant transport. See # <https://bugs.torproject.org/28655> for details. from bridgedb.bridges import PluggableTransport PluggableTransport.probing_resistant_transports = config.PROBING_RESISTANT_TRANSPORTS from bridgedb import persistent state = persistent.State(config=config) from bridgedb.distributors.email.server import addServer as addSMTPServer from bridgedb.distributors.https.server import addWebServer from bridgedb.distributors.moat.server import addMoatServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) proxies = proxy.ProxySet() emailDistributor = None ipDistributor = None moatDistributor = None # Save our state state.key = key state.save() def reload(inThread=True): # pragma: no cover """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables ``cfg`` and ``hashring`` are taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type hashring: A :class:`~bridgedb.bridgerings.BridgeSplitter` :ivar hashring: A class which takes an HMAC key and splits bridges into their hashring assignments. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.info("Reloading the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: logging.info("Loading proxies from: %s" % proxyfile) proxy.loadProxiesFromFile(proxyfile, proxies, removeStale=True) metrics.setProxies(proxies) state.BLACKLISTED_TOR_VERSIONS = parseVersionsList( state.BLACKLISTED_TOR_VERSIONS) logging.info("Reloading blacklisted request headers...") antibot.loadBlacklistedRequestHeaders( config.BLACKLISTED_REQUEST_HEADERS_FILE) logging.info("Reloading decoy bridges...") antibot.loadDecoyBridges(config.DECOY_BRIDGES_FILE) (hashring, emailDistributorTmp, ipDistributorTmp, moatDistributorTmp) = createBridgeRings(cfg, proxies, key) # Initialize our DB. bridgedb.Storage.initializeDBLock() bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") logging.info("Reparsing bridge descriptors...") load(state, hashring, clear=False) logging.info("Bridges loaded: %d" % len(hashring)) loadBlockedBridges(hashring) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No HTTP(S) distributor created!") if moatDistributorTmp is not None: moatDistributorTmp.prepopulateRings() else: logging.warn("No Moat distributor created!") metrix = metrics.InternalMetrics() logging.info("Logging bridge ring metrics for %d rings." % len(hashring.ringsByName)) for ringName, ring in hashring.ringsByName.items(): # Ring is of type FilteredBridgeSplitter or UnallocatedHolder. # FilteredBridgeSplitter splits bridges into subhashrings based on # filters. if hasattr(ring, "filterRings"): for (ringname, (filterFn, subring)) in ring.filterRings.items(): subRingName = "-".join(ring.extractFilterNames(ringname)) metrix.recordBridgesInHashring(ringName, subRingName, len(subring)) elif hasattr(ring, "fingerprints"): metrix.recordBridgesInHashring(ringName, "unallocated", len(ring.fingerprints)) # Dump bridge pool assignments to disk. writeAssignments(hashring, state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if moatDistributorTmp: reactor.callFromThread(replaceBridgeRings, moatDistributor, moatDistributorTmp) if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) if reactor: # pragma: no cover # And actually load it to start parsing. Get back our distributors. emailDistributor, ipDistributor, moatDistributor = reload(False) # Configure all servers: if config.MOAT_DIST and config.MOAT_SHARE: addMoatServer(config, moatDistributor) if config.HTTPS_DIST and config.HTTPS_SHARE: addWebServer(config, ipDistributor) if config.EMAIL_DIST and config.EMAIL_SHARE: addSMTPServer(config, emailDistributor) metrics.setSupportedTransports(config.SUPPORTED_TRANSPORTS) tasks = {} # Setup all our repeating tasks: if config.TASKS['GET_TOR_EXIT_LIST']: tasks['GET_TOR_EXIT_LIST'] = task.LoopingCall( proxy.downloadTorExits, proxies, config.SERVER_PUBLIC_EXTERNAL_IP) if config.TASKS.get('DELETE_UNPARSEABLE_DESCRIPTORS'): delUnparseableSecs = config.TASKS['DELETE_UNPARSEABLE_DESCRIPTORS'] else: delUnparseableSecs = 24 * 60 * 60 # Default to 24 hours # We use the directory name of STATUS_FILE, since that directory # is where the *.unparseable descriptor files will be written to. tasks['DELETE_UNPARSEABLE_DESCRIPTORS'] = task.LoopingCall( runner.cleanupUnparseableDescriptors, os.path.dirname(config.STATUS_FILE), delUnparseableSecs) measurementInterval, _ = config.TASKS['EXPORT_METRICS'] tasks['EXPORT_METRICS'] = task.LoopingCall(writeMetrics, state.METRICS_FILE, measurementInterval) # Schedule all configured repeating tasks: for name, value in config.TASKS.items(): seconds, startNow = value if seconds: try: # Set now to False to get the servers up and running when # first started, rather than spend a bunch of time in # scheduled tasks. tasks[name].start(abs(seconds), now=startNow) except KeyError: logging.info("Task %s is disabled and will not run." % name) else: logging.info("Scheduled task %s to run every %s seconds." % (name, seconds)) # Actually run the servers. try: if reactor and not reactor.running: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: # pragma: no cover logging.fatal("Received keyboard interrupt. Shutting down...") finally: if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()
def setUp(self): # We have to put something in it, otherwise self.ps.should.be.ok won't # think it's truthy: self.ps = proxy.ProxySet(['1.1.1.1'])
def setUp(self): self.protocol = MockExitListProtocol self.proxyList = proxy.ProxySet()
def test_proxy_loadProxiesFromFile_1_file_and_proxyset(self): """Test loading proxies from one file.""" proxyList = proxy.ProxySet(['1.1.1.1']) proxies = proxy.loadProxiesFromFile(self.fn0, proxySet=proxyList) self.assertEqual(len(proxies), 3) self.assertEqual(len(proxyList), 4)
def run(options, reactor=reactor): """This is BridgeDB's main entry point and main runtime loop. Given the parsed commandline options, this function handles locating the configuration file, loading and parsing it, and then either (re)parsing plus (re)starting the servers, or dumping bridge assignments to files. :type options: :class:`bridgedb.parse.options.MainOptions` :param options: A pre-parsed options class containing any arguments and options given in the commandline we were called with. :type state: :class:`bridgedb.persistent.State` :ivar state: A persistent state object which holds config changes. :param reactor: An implementer of :api:`twisted.internet.interfaces.IReactorCore`. This parameter is mainly for testing; the default :api:`twisted.internet.epollreactor.EPollReactor` is fine for normal application runs. """ # Change to the directory where we're supposed to run. This must be done # before parsing the config file, otherwise there will need to be two # copies of the config file, one in the directory BridgeDB is started in, # and another in the directory it changes into. os.chdir(options['rundir']) if options['verbosity'] <= 10: # Corresponds to logging.DEBUG print("Changed to runtime directory %r" % os.getcwd()) config = loadConfig(options['config']) config.RUN_IN_DIR = options['rundir'] # Set up logging as early as possible. We cannot import from the bridgedb # package any of our modules which import :mod:`logging` and start using # it, at least, not until :func:`safelog.configureLogging` is # called. Otherwise a default handler that logs to the console will be # created by the imported module, and all further calls to # :func:`logging.basicConfig` will be ignored. util.configureLogging(config) if options['dump-bridges'] or (options.subCommand is not None): runSubcommand(options, config) # Write the pidfile only after any options.subCommands are run (because # these exit when they are finished). Otherwise, if there is a subcommand, # the real PIDFILE would get overwritten with the PID of the temporary # bridgedb process running the subcommand. if config.PIDFILE: logging.debug("Writing server PID to file: '%s'" % config.PIDFILE) with open(config.PIDFILE, 'w') as pidfile: pidfile.write("%s\n" % os.getpid()) pidfile.flush() from bridgedb import persistent state = persistent.State(config=config) from bridgedb.email.server import addServer as addSMTPServer from bridgedb.https.server import addWebServer # Load the master key, or create a new one. key = crypto.getKey(config.MASTER_KEY_FILE) proxies = proxy.ProxySet() emailDistributor = None ipDistributor = None # Save our state state.proxies = proxies state.key = key state.save() def reload(inThread=True): """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``hashring``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type hashring: A :class:`~bridgedb.Bridges.BridgeSplitter` :ivar hashring: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`~bridgedb.proxy.ProxySet` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`~bridgedb.https.distributor.HTTPSDistributor`. :ivar emailDistributor: A :class:`~bridgedb.email.distributor.EmailDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.info("Reloading the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: logging.info("Loading proxies from: %s" % proxyfile) proxy.loadProxiesFromFile(proxyfile, state.proxies, removeStale=True) logging.info("Reparsing bridge descriptors...") (hashring, emailDistributorTmp, ipDistributorTmp) = createBridgeRings(cfg, state.proxies, key) logging.info("Bridges loaded: %d" % len(hashring)) # Initialize our DB. bridgedb.Storage.initializeDBLock() bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") load(state, hashring, clear=False) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings logging.info( "Bridges allotted for %s distribution: %d" % (emailDistributorTmp.name, len(emailDistributorTmp.hashring))) else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings logging.info( "Bridges allotted for %s distribution: %d" % (ipDistributorTmp.name, len(ipDistributorTmp.hashring))) logging.info("\tNum bridges:\tFilter set:") nSubrings = 0 ipSubrings = ipDistributorTmp.hashring.filterRings for (ringname, (filterFn, subring)) in ipSubrings.items(): nSubrings += 1 filterSet = ' '.join( ipDistributorTmp.hashring.extractFilterNames(ringname)) logging.info("\t%2d bridges\t%s" % (len(subring), filterSet)) logging.info("Total subrings for %s: %d" % (ipDistributorTmp.name, nSubrings)) else: logging.warn("No HTTP(S) distributor created!") # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments to file: '%s'" % state.ASSIGNMENTS_FILE) fh = open(state.ASSIGNMENTS_FILE, 'a') fh.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) hashring.dumpAssignments(fh) fh.flush() fh.close() except IOError: logging.info("I/O error while writing assignments to: '%s'" % state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp global _reloadFn _reloadFn = reload signal.signal(signal.SIGHUP, _handleSIGHUP) signal.signal(signal.SIGUSR1, _handleSIGUSR1) if reactor: # And actually load it to start parsing. Get back our distributors. emailDistributor, ipDistributor = reload(False) # Configure all servers: if config.HTTPS_DIST and config.HTTPS_SHARE: addWebServer(config, ipDistributor) if config.EMAIL_DIST and config.EMAIL_SHARE: addSMTPServer(config, emailDistributor) tasks = {} # Setup all our repeating tasks: if config.TASKS['GET_TOR_EXIT_LIST']: tasks['GET_TOR_EXIT_LIST'] = task.LoopingCall( proxy.downloadTorExits, state.proxies, config.SERVER_PUBLIC_EXTERNAL_IP) if config.TASKS.get('DELETE_UNPARSEABLE_DESCRIPTORS'): delUnparseableSecs = config.TASKS['DELETE_UNPARSEABLE_DESCRIPTORS'] else: delUnparseableSecs = 24 * 60 * 60 # Default to 24 hours # We use the directory name of STATUS_FILE, since that directory # is where the *.unparseable descriptor files will be written to. tasks['DELETE_UNPARSEABLE_DESCRIPTORS'] = task.LoopingCall( runner.cleanupUnparseableDescriptors, os.path.dirname(config.STATUS_FILE), delUnparseableSecs) # Schedule all configured repeating tasks: for name, seconds in config.TASKS.items(): if seconds: try: tasks[name].start(abs(seconds)) except KeyError: logging.info("Task %s is disabled and will not run." % name) else: logging.info("Scheduled task %s to run every %s seconds." % (name, seconds)) # Actually run the servers. try: if reactor and not reactor.running: logging.info("Starting reactors.") reactor.run() except KeyboardInterrupt: logging.fatal("Received keyboard interrupt. Shutting down...") finally: if config.PIDFILE: os.unlink(config.PIDFILE) logging.info("Exiting...") sys.exit()