def _handleSIGUSR1(*args): """Handler for SIGUSR1. Calls :func:`~bridgedb.runner.doDumpBridges`.""" logging.debug("Caught SIGUSR1 signal") from bridgedb import runner logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Dumping bridge assignments to files...") reactor.callInThread(runner.doDumpBridges, cfg)
def reload(inThread=True): # pragma: no cover """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables ``cfg`` and ``hashring`` are taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type hashring: A :class:`~bridgedb.bridgerings.BridgeSplitter` :ivar hashring: A class which takes an HMAC key and splits bridges into their hashring assignments. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.info("Reloading the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: logging.info("Loading proxies from: %s" % proxyfile) proxy.loadProxiesFromFile(proxyfile, proxies, removeStale=True) metrics.setProxies(proxies) state.BLACKLISTED_TOR_VERSIONS = parseVersionsList( state.BLACKLISTED_TOR_VERSIONS) logging.info("Reloading blacklisted request headers...") antibot.loadBlacklistedRequestHeaders( config.BLACKLISTED_REQUEST_HEADERS_FILE) logging.info("Reloading decoy bridges...") antibot.loadDecoyBridges(config.DECOY_BRIDGES_FILE) (hashring, emailDistributorTmp, ipDistributorTmp, moatDistributorTmp) = createBridgeRings(cfg, proxies, key) # Initialize our DB. bridgedb.Storage.initializeDBLock() bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") logging.info("Reparsing bridge descriptors...") load(state, hashring, clear=False) logging.info("Bridges loaded: %d" % len(hashring)) loadBlockedBridges(hashring) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No HTTP(S) distributor created!") if moatDistributorTmp is not None: moatDistributorTmp.prepopulateRings() else: logging.warn("No Moat distributor created!") metrix = metrics.InternalMetrics() logging.info("Logging bridge ring metrics for %d rings." % len(hashring.ringsByName)) for ringName, ring in hashring.ringsByName.items(): # Ring is of type FilteredBridgeSplitter or UnallocatedHolder. # FilteredBridgeSplitter splits bridges into subhashrings based on # filters. if hasattr(ring, "filterRings"): for (ringname, (filterFn, subring)) in ring.filterRings.items(): subRingName = "-".join(ring.extractFilterNames(ringname)) metrix.recordBridgesInHashring(ringName, subRingName, len(subring)) elif hasattr(ring, "fingerprints"): metrix.recordBridgesInHashring(ringName, "unallocated", len(ring.fingerprints)) # Dump bridge pool assignments to disk. writeAssignments(hashring, state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if moatDistributorTmp: reactor.callFromThread(replaceBridgeRings, moatDistributor, moatDistributorTmp) if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp
def reload(inThread=True): # pragma: no cover """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``hashring``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type hashring: A :class:`~bridgedb.Bridges.BridgeSplitter` :ivar hashring: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`~bridgedb.proxy.ProxySet` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`~bridgedb.distributors.https.distributor.HTTPSDistributor`. :ivar emailDistributor: A :class:`~bridgedb.distributors.email.distributor.EmailDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.info("Reloading the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: logging.info("Loading proxies from: %s" % proxyfile) proxy.loadProxiesFromFile(proxyfile, state.proxies, removeStale=True) logging.info("Reparsing bridge descriptors...") (hashring, emailDistributorTmp, ipDistributorTmp, moatDistributorTmp) = createBridgeRings(cfg, state.proxies, key) logging.info("Bridges loaded: %d" % len(hashring)) # Initialize our DB. bridgedb.Storage.initializeDBLock() bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") load(state, hashring, clear=False) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No HTTP(S) distributor created!") if moatDistributorTmp is not None: moatDistributorTmp.prepopulateRings() else: logging.warn("No Moat distributor created!") # Dump bridge pool assignments to disk. writeAssignments(hashring, state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if moatDistributorTmp: reactor.callFromThread(replaceBridgeRings, moatDistributor, moatDistributorTmp) if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp
def test_load_with_state(self): loadedState = persistent.load(self.state) self.loadedStateAssertions(loadedState)
def test_load(self): self.state.save() loadedState = persistent.load() self.loadedStateAssertions(loadedState)
def reload(inThread=True): """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``splitter``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type splitter: A :class:`bridgedb.Bridges.BridgeHolder` :ivar splitter: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`ProxyCategory` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`Dist.IPBasedDistributor`. :ivar emailDistributor: A :class:`Dist.EmailBasedDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.debug("Saving state again before reparsing descriptors...") state.save() logging.info("Reparsing bridge descriptors...") (splitter, emailDistributorTmp, ipDistributorTmp) = createBridgeRings(cfg, proxyList, key) # Initialize our DB. bridgedb.Storage.initializeDBLock() db = bridgedb.Storage.openOrConvertDatabase(cfg.DB_FILE + ".sqlite", cfg.DB_FILE) bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") load(state, splitter, clear=False) state = persistent.load() logging.info("Bridges loaded: %d" % len(splitter)) logging.debug("Replacing the list of open proxies...") state.proxyList.replaceProxyList(loadProxyList(cfg)) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (emailDistributorTmp.name, len(emailDistributorTmp.splitter))) else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings logging.info("Bridges allotted for %s distribution: %d" % (ipDistributorTmp.name, len(ipDistributorTmp.splitter))) logging.info("\tNum bridges:\tFilter set:") nSubrings = 0 ipSubrings = ipDistributorTmp.splitter.filterRings for (ringname, (filterFn, subring)) in ipSubrings.items(): nSubrings += 1 filterSet = ' '.join( ipDistributorTmp.splitter.extractFilterNames(ringname)) logging.info("\t%2d bridges\t%s" % (len(subring), filterSet)) logging.info("Total subrings for %s: %d" % (ipDistributorTmp.name, nSubrings)) else: logging.warn("No HTTP(S) distributor created!") # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments to file: '%s'" % state.ASSIGNMENTS_FILE) fh = open(state.ASSIGNMENTS_FILE, 'a') fh.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) splitter.dumpAssignments(fh) fh.flush() fh.close() except IOError: logging.info("I/O error while writing assignments to: '%s'" % state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp
def reload(inThread=True): """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables, ``cfg``, ``splitter``, ``proxyList``, ``ipDistributor``, and ``emailDistributor`` are all taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type splitter: A :class:`bridgedb.Bridges.BridgeHolder` :ivar splitter: A class which takes an HMAC key and splits bridges into their hashring assignments. :type proxyList: :class:`~bridgedb.proxy.ProxySet` :ivar proxyList: The container for the IP addresses of any currently known open proxies. :ivar ipDistributor: A :class:`Dist.IPBasedDistributor`. :ivar emailDistributor: A :class:`Dist.EmailBasedDistributor`. :ivar dict tasks: A dictionary of ``{name: task}``, where name is a string to associate with the ``task``, and ``task`` is some scheduled event, repetitive or otherwise, for the :class:`reactor <twisted.internet.epollreactor.EPollReactor>`. See the classes within the :api:`twisted.internet.tasks` module. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.debug("Saving state again before reparsing descriptors...") state.save() logging.info("Reparsing bridge descriptors...") (splitter, emailDistributorTmp, ipDistributorTmp) = createBridgeRings(cfg, proxyList, key) # Initialize our DB. bridgedb.Storage.initializeDBLock() db = bridgedb.Storage.openDatabase(cfg.DB_FILE + ".sqlite") bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") load(state, splitter, clear=False) state = persistent.load() logging.info("Bridges loaded: %d" % len(splitter)) logging.debug("Replacing the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: proxy.loadProxiesFromFile(proxyfile, state.proxyList, removeStale=True) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings logging.info( "Bridges allotted for %s distribution: %d" % (emailDistributorTmp.name, len(emailDistributorTmp.splitter))) else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings logging.info( "Bridges allotted for %s distribution: %d" % (ipDistributorTmp.name, len(ipDistributorTmp.splitter))) logging.info("\tNum bridges:\tFilter set:") nSubrings = 0 ipSubrings = ipDistributorTmp.splitter.filterRings for (ringname, (filterFn, subring)) in ipSubrings.items(): nSubrings += 1 filterSet = ' '.join( ipDistributorTmp.splitter.extractFilterNames(ringname)) logging.info("\t%2d bridges\t%s" % (len(subring), filterSet)) logging.info("Total subrings for %s: %d" % (ipDistributorTmp.name, nSubrings)) else: logging.warn("No HTTP(S) distributor created!") # Dump bridge pool assignments to disk. try: logging.debug("Dumping pool assignments to file: '%s'" % state.ASSIGNMENTS_FILE) fh = open(state.ASSIGNMENTS_FILE, 'a') fh.write("bridge-pool-assignment %s\n" % time.strftime("%Y-%m-%d %H:%M:%S")) splitter.dumpAssignments(fh) fh.flush() fh.close() except IOError: logging.info("I/O error while writing assignments to: '%s'" % state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp