Esempio n. 1
0
 def test_loadConfig_with_file_and_class(self):
     """We should be able to reload and parse the ``bridgedb.conf``
     file, if we have a config class as well.
     """
     config = configure.loadConfig(self.configFilename)
     newConfig = configure.loadConfig(self.configFilename, configCls=config)
     self.assertTrue(newConfig)
Esempio n. 2
0
 def test_loadConfig_with_file_and_class(self):
     """We should be able to reload and parse the ``bridgedb.conf``
     file, if we have a config class as well.
     """
     config = configure.loadConfig(self.configFilename)
     newConfig = configure.loadConfig(self.configFilename, configCls=config)
     self.assertTrue(newConfig)
Esempio n. 3
0
 def test_loadConfig_set_EXTRA_INFO_FILES_when_None(self):
     """If certain options, like the ``EXTRA_INFO_FILES`` option in the
     config file weren't set, they should be made into lists so that our
     parsers don't choke on them later.
     """
     config = configure.loadConfig(self.configFilename)
     setattr(config, "EXTRA_INFO_FILES", None)
     self.assertTrue(config.EXTRA_INFO_FILES is None)
     newConfig = configure.loadConfig(configCls=config)
     self.assertIsInstance(newConfig.EXTRA_INFO_FILES, list)
Esempio n. 4
0
 def test_loadConfig_set_EXTRA_INFO_FILES_when_None(self):
     """If certain options, like the ``EXTRA_INFO_FILES`` option in the
     config file weren't set, they should be made into lists so that our
     parsers don't choke on them later.
     """
     config = configure.loadConfig(self.configFilename)
     setattr(config, "EXTRA_INFO_FILES", None)
     self.assertTrue(config.EXTRA_INFO_FILES is None)
     newConfig = configure.loadConfig(configCls=config)
     self.assertIsInstance(newConfig.EXTRA_INFO_FILES, list)
Esempio n. 5
0
def _handleSIGUSR1(*args):
    """Handler for SIGUSR1. Calls :func:`~bridgedb.runner.doDumpBridges`."""
    logging.debug("Caught SIGUSR1 signal")

    from bridgedb import runner

    logging.info("Loading saved state...")
    state = persistent.load()
    cfg = loadConfig(state.CONFIG_FILE, state.config)

    logging.info("Dumping bridge assignments to files...")
    reactor.callInThread(runner.doDumpBridges, cfg)
Esempio n. 6
0
 def test_loadConfig_with_class(self):
     """We should be able to recreate a config, given its class."""
     config = configure.loadConfig(self.configFilename)
     newConfig = configure.loadConfig(configCls=config)
     self.assertTrue(newConfig)
Esempio n. 7
0
 def test_loadConfig_with_file(self):
     """We should be able to load and parse the standard ``bridgedb.conf``
     file from the top directory of this repository.
     """
     config = configure.loadConfig(self.configFilename)
     self.assertTrue(config)
Esempio n. 8
0
    def reload(inThread=True):  # pragma: no cover
        """Reload settings, proxy lists, and bridges.

        State should be saved before calling this method, and will be saved
        again at the end of it.

        The internal variables ``cfg`` and ``hashring`` are taken from a
        :class:`~bridgedb.persistent.State` instance, which has been saved to a
        statefile with :meth:`bridgedb.persistent.State.save`.

        :type cfg: :class:`Conf`
        :ivar cfg: The current configuration, including any in-memory
            settings (i.e. settings whose values were not obtained from the
            config file, but were set via a function somewhere)
        :type hashring: A :class:`~bridgedb.bridgerings.BridgeSplitter`
        :ivar hashring: A class which takes an HMAC key and splits bridges
            into their hashring assignments.
        """
        logging.debug("Caught SIGHUP")
        logging.info("Reloading...")

        logging.info("Loading saved state...")
        state = persistent.load()
        cfg = loadConfig(state.CONFIG_FILE, state.config)
        logging.info("Updating any changed settings...")
        state.useChangedSettings(cfg)

        level = getattr(state, 'LOGLEVEL', 'WARNING')
        logging.info("Updating log level to: '%s'" % level)
        level = getattr(logging, level)
        logging.getLogger().setLevel(level)

        logging.info("Reloading the list of open proxies...")
        for proxyfile in cfg.PROXY_LIST_FILES:
            logging.info("Loading proxies from: %s" % proxyfile)
            proxy.loadProxiesFromFile(proxyfile, proxies, removeStale=True)
        metrics.setProxies(proxies)

        state.BLACKLISTED_TOR_VERSIONS = parseVersionsList(
            state.BLACKLISTED_TOR_VERSIONS)

        logging.info("Reloading blacklisted request headers...")
        antibot.loadBlacklistedRequestHeaders(
            config.BLACKLISTED_REQUEST_HEADERS_FILE)
        logging.info("Reloading decoy bridges...")
        antibot.loadDecoyBridges(config.DECOY_BRIDGES_FILE)

        (hashring, emailDistributorTmp, ipDistributorTmp,
         moatDistributorTmp) = createBridgeRings(cfg, proxies, key)

        # Initialize our DB.
        bridgedb.Storage.initializeDBLock()
        bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite")
        logging.info("Reparsing bridge descriptors...")
        load(state, hashring, clear=False)
        logging.info("Bridges loaded: %d" % len(hashring))
        loadBlockedBridges(hashring)

        if emailDistributorTmp is not None:
            emailDistributorTmp.prepopulateRings()  # create default rings
        else:
            logging.warn("No email distributor created!")

        if ipDistributorTmp is not None:
            ipDistributorTmp.prepopulateRings()  # create default rings
        else:
            logging.warn("No HTTP(S) distributor created!")

        if moatDistributorTmp is not None:
            moatDistributorTmp.prepopulateRings()
        else:
            logging.warn("No Moat distributor created!")

        metrix = metrics.InternalMetrics()
        logging.info("Logging bridge ring metrics for %d rings." %
                     len(hashring.ringsByName))
        for ringName, ring in hashring.ringsByName.items():
            # Ring is of type FilteredBridgeSplitter or UnallocatedHolder.
            # FilteredBridgeSplitter splits bridges into subhashrings based on
            # filters.
            if hasattr(ring, "filterRings"):
                for (ringname, (filterFn,
                                subring)) in ring.filterRings.items():
                    subRingName = "-".join(ring.extractFilterNames(ringname))
                    metrix.recordBridgesInHashring(ringName, subRingName,
                                                   len(subring))
            elif hasattr(ring, "fingerprints"):
                metrix.recordBridgesInHashring(ringName, "unallocated",
                                               len(ring.fingerprints))

        # Dump bridge pool assignments to disk.
        writeAssignments(hashring, state.ASSIGNMENTS_FILE)
        state.save()

        if inThread:
            # XXX shutdown the distributors if they were previously running
            # and should now be disabled
            if moatDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, moatDistributor,
                                       moatDistributorTmp)
            if ipDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, ipDistributor,
                                       ipDistributorTmp)
            if emailDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, emailDistributor,
                                       emailDistributorTmp)
        else:
            # We're still starting up. Return these distributors so
            # they are configured in the outer-namespace
            return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp
Esempio n. 9
0
def run(options, reactor=reactor):
    """This is BridgeDB's main entry point and main runtime loop.

    Given the parsed commandline options, this function handles locating the
    configuration file, loading and parsing it, and then either (re)parsing
    plus (re)starting the servers, or dumping bridge assignments to files.

    :type options: :class:`bridgedb.parse.options.MainOptions`
    :param options: A pre-parsed options class containing any arguments and
        options given in the commandline we were called with.
    :type state: :class:`bridgedb.persistent.State`
    :ivar state: A persistent state object which holds config changes.
    :param reactor: An implementer of
        :api:`twisted.internet.interfaces.IReactorCore`. This parameter is
        mainly for testing; the default
        :api:`twisted.internet.epollreactor.EPollReactor` is fine for normal
        application runs.
    """
    # Change to the directory where we're supposed to run. This must be done
    # before parsing the config file, otherwise there will need to be two
    # copies of the config file, one in the directory BridgeDB is started in,
    # and another in the directory it changes into.
    os.chdir(options['rundir'])
    if options['verbosity'] <= 10:  # Corresponds to logging.DEBUG
        print("Changed to runtime directory %r" % os.getcwd())

    config = loadConfig(options['config'])
    config.RUN_IN_DIR = options['rundir']

    # Set up logging as early as possible. We cannot import from the bridgedb
    # package any of our modules which import :mod:`logging` and start using
    # it, at least, not until :func:`safelog.configureLogging` is
    # called. Otherwise a default handler that logs to the console will be
    # created by the imported module, and all further calls to
    # :func:`logging.basicConfig` will be ignored.
    util.configureLogging(config)

    if options.subCommand is not None:
        runSubcommand(options, config)

    # Write the pidfile only after any options.subCommands are run (because
    # these exit when they are finished). Otherwise, if there is a subcommand,
    # the real PIDFILE would get overwritten with the PID of the temporary
    # bridgedb process running the subcommand.
    if config.PIDFILE:
        logging.debug("Writing server PID to file: '%s'" % config.PIDFILE)
        with open(config.PIDFILE, 'w') as pidfile:
            pidfile.write("%s\n" % os.getpid())
            pidfile.flush()

    # Let our pluggable transport class know what transports are resistant to
    # active probing.  We need to know because we shouldn't hand out a
    # probing-vulnerable transport on a bridge that supports a
    # probing-resistant transport.  See
    # <https://bugs.torproject.org/28655> for details.
    from bridgedb.bridges import PluggableTransport
    PluggableTransport.probing_resistant_transports = config.PROBING_RESISTANT_TRANSPORTS

    from bridgedb import persistent

    state = persistent.State(config=config)

    from bridgedb.distributors.email.server import addServer as addSMTPServer
    from bridgedb.distributors.https.server import addWebServer
    from bridgedb.distributors.moat.server import addMoatServer

    # Load the master key, or create a new one.
    key = crypto.getKey(config.MASTER_KEY_FILE)
    proxies = proxy.ProxySet()
    emailDistributor = None
    ipDistributor = None
    moatDistributor = None

    # Save our state
    state.key = key
    state.save()

    def reload(inThread=True):  # pragma: no cover
        """Reload settings, proxy lists, and bridges.

        State should be saved before calling this method, and will be saved
        again at the end of it.

        The internal variables ``cfg`` and ``hashring`` are taken from a
        :class:`~bridgedb.persistent.State` instance, which has been saved to a
        statefile with :meth:`bridgedb.persistent.State.save`.

        :type cfg: :class:`Conf`
        :ivar cfg: The current configuration, including any in-memory
            settings (i.e. settings whose values were not obtained from the
            config file, but were set via a function somewhere)
        :type hashring: A :class:`~bridgedb.bridgerings.BridgeSplitter`
        :ivar hashring: A class which takes an HMAC key and splits bridges
            into their hashring assignments.
        """
        logging.debug("Caught SIGHUP")
        logging.info("Reloading...")

        logging.info("Loading saved state...")
        state = persistent.load()
        cfg = loadConfig(state.CONFIG_FILE, state.config)
        logging.info("Updating any changed settings...")
        state.useChangedSettings(cfg)

        level = getattr(state, 'LOGLEVEL', 'WARNING')
        logging.info("Updating log level to: '%s'" % level)
        level = getattr(logging, level)
        logging.getLogger().setLevel(level)

        logging.info("Reloading the list of open proxies...")
        for proxyfile in cfg.PROXY_LIST_FILES:
            logging.info("Loading proxies from: %s" % proxyfile)
            proxy.loadProxiesFromFile(proxyfile, proxies, removeStale=True)
        metrics.setProxies(proxies)

        state.BLACKLISTED_TOR_VERSIONS = parseVersionsList(
            state.BLACKLISTED_TOR_VERSIONS)

        logging.info("Reloading blacklisted request headers...")
        antibot.loadBlacklistedRequestHeaders(
            config.BLACKLISTED_REQUEST_HEADERS_FILE)
        logging.info("Reloading decoy bridges...")
        antibot.loadDecoyBridges(config.DECOY_BRIDGES_FILE)

        (hashring, emailDistributorTmp, ipDistributorTmp,
         moatDistributorTmp) = createBridgeRings(cfg, proxies, key)

        # Initialize our DB.
        bridgedb.Storage.initializeDBLock()
        bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite")
        logging.info("Reparsing bridge descriptors...")
        load(state, hashring, clear=False)
        logging.info("Bridges loaded: %d" % len(hashring))
        loadBlockedBridges(hashring)

        if emailDistributorTmp is not None:
            emailDistributorTmp.prepopulateRings()  # create default rings
        else:
            logging.warn("No email distributor created!")

        if ipDistributorTmp is not None:
            ipDistributorTmp.prepopulateRings()  # create default rings
        else:
            logging.warn("No HTTP(S) distributor created!")

        if moatDistributorTmp is not None:
            moatDistributorTmp.prepopulateRings()
        else:
            logging.warn("No Moat distributor created!")

        metrix = metrics.InternalMetrics()
        logging.info("Logging bridge ring metrics for %d rings." %
                     len(hashring.ringsByName))
        for ringName, ring in hashring.ringsByName.items():
            # Ring is of type FilteredBridgeSplitter or UnallocatedHolder.
            # FilteredBridgeSplitter splits bridges into subhashrings based on
            # filters.
            if hasattr(ring, "filterRings"):
                for (ringname, (filterFn,
                                subring)) in ring.filterRings.items():
                    subRingName = "-".join(ring.extractFilterNames(ringname))
                    metrix.recordBridgesInHashring(ringName, subRingName,
                                                   len(subring))
            elif hasattr(ring, "fingerprints"):
                metrix.recordBridgesInHashring(ringName, "unallocated",
                                               len(ring.fingerprints))

        # Dump bridge pool assignments to disk.
        writeAssignments(hashring, state.ASSIGNMENTS_FILE)
        state.save()

        if inThread:
            # XXX shutdown the distributors if they were previously running
            # and should now be disabled
            if moatDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, moatDistributor,
                                       moatDistributorTmp)
            if ipDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, ipDistributor,
                                       ipDistributorTmp)
            if emailDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, emailDistributor,
                                       emailDistributorTmp)
        else:
            # We're still starting up. Return these distributors so
            # they are configured in the outer-namespace
            return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp

    global _reloadFn
    _reloadFn = reload
    signal.signal(signal.SIGHUP, _handleSIGHUP)

    if reactor:  # pragma: no cover
        # And actually load it to start parsing. Get back our distributors.
        emailDistributor, ipDistributor, moatDistributor = reload(False)

        # Configure all servers:
        if config.MOAT_DIST and config.MOAT_SHARE:
            addMoatServer(config, moatDistributor)
        if config.HTTPS_DIST and config.HTTPS_SHARE:
            addWebServer(config, ipDistributor)
        if config.EMAIL_DIST and config.EMAIL_SHARE:
            addSMTPServer(config, emailDistributor)

        metrics.setSupportedTransports(config.SUPPORTED_TRANSPORTS)

        tasks = {}

        # Setup all our repeating tasks:
        if config.TASKS['GET_TOR_EXIT_LIST']:
            tasks['GET_TOR_EXIT_LIST'] = task.LoopingCall(
                proxy.downloadTorExits, proxies,
                config.SERVER_PUBLIC_EXTERNAL_IP)

        if config.TASKS.get('DELETE_UNPARSEABLE_DESCRIPTORS'):
            delUnparseableSecs = config.TASKS['DELETE_UNPARSEABLE_DESCRIPTORS']
        else:
            delUnparseableSecs = 24 * 60 * 60  # Default to 24 hours

        # We use the directory name of STATUS_FILE, since that directory
        # is where the *.unparseable descriptor files will be written to.
        tasks['DELETE_UNPARSEABLE_DESCRIPTORS'] = task.LoopingCall(
            runner.cleanupUnparseableDescriptors,
            os.path.dirname(config.STATUS_FILE), delUnparseableSecs)

        measurementInterval, _ = config.TASKS['EXPORT_METRICS']
        tasks['EXPORT_METRICS'] = task.LoopingCall(writeMetrics,
                                                   state.METRICS_FILE,
                                                   measurementInterval)

        # Schedule all configured repeating tasks:
        for name, value in config.TASKS.items():
            seconds, startNow = value
            if seconds:
                try:
                    # Set now to False to get the servers up and running when
                    # first started, rather than spend a bunch of time in
                    # scheduled tasks.
                    tasks[name].start(abs(seconds), now=startNow)
                except KeyError:
                    logging.info("Task %s is disabled and will not run." %
                                 name)
                else:
                    logging.info("Scheduled task %s to run every %s seconds." %
                                 (name, seconds))

    # Actually run the servers.
    try:
        if reactor and not reactor.running:
            logging.info("Starting reactors.")
            reactor.run()
    except KeyboardInterrupt:  # pragma: no cover
        logging.fatal("Received keyboard interrupt. Shutting down...")
    finally:
        if config.PIDFILE:
            os.unlink(config.PIDFILE)
        logging.info("Exiting...")
        sys.exit()
Esempio n. 10
0
    def reload(inThread=True): # pragma: no cover
        """Reload settings, proxy lists, and bridges.

        State should be saved before calling this method, and will be saved
        again at the end of it.

        The internal variables, ``cfg``, ``hashring``, ``proxyList``,
        ``ipDistributor``, and ``emailDistributor`` are all taken from a
        :class:`~bridgedb.persistent.State` instance, which has been saved to
        a statefile with :meth:`bridgedb.persistent.State.save`.

        :type cfg: :class:`Conf`
        :ivar cfg: The current configuration, including any in-memory
            settings (i.e. settings whose values were not obtained from the
            config file, but were set via a function somewhere)
        :type hashring: A :class:`~bridgedb.Bridges.BridgeSplitter`
        :ivar hashring: A class which takes an HMAC key and splits bridges
            into their hashring assignments.
        :type proxyList: :class:`~bridgedb.proxy.ProxySet`
        :ivar proxyList: The container for the IP addresses of any currently
            known open proxies.
        :ivar ipDistributor: A
            :class:`~bridgedb.distributors.https.distributor.HTTPSDistributor`.
        :ivar emailDistributor: A
            :class:`~bridgedb.distributors.email.distributor.EmailDistributor`.
        :ivar dict tasks: A dictionary of ``{name: task}``, where name is a
            string to associate with the ``task``, and ``task`` is some
            scheduled event, repetitive or otherwise, for the :class:`reactor
            <twisted.internet.epollreactor.EPollReactor>`. See the classes
            within the :api:`twisted.internet.tasks` module.
        """
        logging.debug("Caught SIGHUP")
        logging.info("Reloading...")

        logging.info("Loading saved state...")
        state = persistent.load()
        cfg = loadConfig(state.CONFIG_FILE, state.config)
        logging.info("Updating any changed settings...")
        state.useChangedSettings(cfg)

        level = getattr(state, 'LOGLEVEL', 'WARNING')
        logging.info("Updating log level to: '%s'" % level)
        level = getattr(logging, level)
        logging.getLogger().setLevel(level)

        logging.info("Reloading the list of open proxies...")
        for proxyfile in cfg.PROXY_LIST_FILES:
            logging.info("Loading proxies from: %s" % proxyfile)
            proxy.loadProxiesFromFile(proxyfile, state.proxies, removeStale=True)

        logging.info("Reparsing bridge descriptors...")
        (hashring,
         emailDistributorTmp,
         ipDistributorTmp,
         moatDistributorTmp) = createBridgeRings(cfg, state.proxies, key)
        logging.info("Bridges loaded: %d" % len(hashring))

        # Initialize our DB.
        bridgedb.Storage.initializeDBLock()
        bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite")
        load(state, hashring, clear=False)

        if emailDistributorTmp is not None:
            emailDistributorTmp.prepopulateRings() # create default rings
        else:
            logging.warn("No email distributor created!")

        if ipDistributorTmp is not None:
            ipDistributorTmp.prepopulateRings() # create default rings
        else:
            logging.warn("No HTTP(S) distributor created!")

        if moatDistributorTmp is not None:
            moatDistributorTmp.prepopulateRings()
        else:
            logging.warn("No Moat distributor created!")

        # Dump bridge pool assignments to disk.
        writeAssignments(hashring, state.ASSIGNMENTS_FILE)
        state.save()

        if inThread:
            # XXX shutdown the distributors if they were previously running
            # and should now be disabled
            if moatDistributorTmp:
                reactor.callFromThread(replaceBridgeRings,
                                       moatDistributor, moatDistributorTmp)
            if ipDistributorTmp:
                reactor.callFromThread(replaceBridgeRings,
                                       ipDistributor, ipDistributorTmp)
            if emailDistributorTmp:
                reactor.callFromThread(replaceBridgeRings,
                                       emailDistributor, emailDistributorTmp)
        else:
            # We're still starting up. Return these distributors so
            # they are configured in the outer-namespace
            return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp
Esempio n. 11
0
 def test_loadConfig_returns_Conf(self):
     """After loading and parsing the ``bridgedb.conf`` file, we should have
     a :class:`bridgedb.configure.Conf`.
     """
     config = configure.loadConfig(self.configFilename)
     self.assertIsInstance(config, configure.Conf)
Esempio n. 12
0
 def test_loadConfig_with_class(self):
     """We should be able to recreate a config, given its class."""
     config = configure.loadConfig(self.configFilename)
     newConfig = configure.loadConfig(configCls=config)
     self.assertTrue(newConfig)
Esempio n. 13
0
 def test_loadConfig_with_file(self):
     """We should be able to load and parse the standard ``bridgedb.conf``
     file from the top directory of this repository.
     """
     config = configure.loadConfig(self.configFilename)
     self.assertTrue(config)
Esempio n. 14
0
def run(options, reactor=reactor):
    """This is BridgeDB's main entry point and main runtime loop.

    Given the parsed commandline options, this function handles locating the
    configuration file, loading and parsing it, and then either (re)parsing
    plus (re)starting the servers, or dumping bridge assignments to files.

    :type options: :class:`bridgedb.parse.options.MainOptions`
    :param options: A pre-parsed options class containing any arguments and
        options given in the commandline we were called with.
    :type state: :class:`bridgedb.persistent.State`
    :ivar state: A persistent state object which holds config changes.
    :param reactor: An implementer of
        :api:`twisted.internet.interfaces.IReactorCore`. This parameter is
        mainly for testing; the default
        :api:`twisted.internet.epollreactor.EPollReactor` is fine for normal
        application runs.
    """
    # Change to the directory where we're supposed to run. This must be done
    # before parsing the config file, otherwise there will need to be two
    # copies of the config file, one in the directory BridgeDB is started in,
    # and another in the directory it changes into.
    os.chdir(options['rundir'])
    if options['verbosity'] <= 10:  # Corresponds to logging.DEBUG
        print("Changed to runtime directory %r" % os.getcwd())

    config = loadConfig(options['config'])
    config.RUN_IN_DIR = options['rundir']

    # Set up logging as early as possible. We cannot import from the bridgedb
    # package any of our modules which import :mod:`logging` and start using
    # it, at least, not until :func:`safelog.configureLogging` is
    # called. Otherwise a default handler that logs to the console will be
    # created by the imported module, and all further calls to
    # :func:`logging.basicConfig` will be ignored.
    util.configureLogging(config)

    if options['dump-bridges'] or (options.subCommand is not None):
        runSubcommand(options, config)

    # Write the pidfile only after any options.subCommands are run (because
    # these exit when they are finished). Otherwise, if there is a subcommand,
    # the real PIDFILE would get overwritten with the PID of the temporary
    # bridgedb process running the subcommand.
    if config.PIDFILE:
        logging.debug("Writing server PID to file: '%s'" % config.PIDFILE)
        with open(config.PIDFILE, 'w') as pidfile:
            pidfile.write("%s\n" % os.getpid())
            pidfile.flush()

    from bridgedb import persistent

    state = persistent.State(config=config)

    from bridgedb.email.server import addServer as addSMTPServer
    from bridgedb.https.server import addWebServer

    # Load the master key, or create a new one.
    key = crypto.getKey(config.MASTER_KEY_FILE)
    proxies = proxy.ProxySet()
    emailDistributor = None
    ipDistributor = None

    # Save our state
    state.proxies = proxies
    state.key = key
    state.save()

    def reload(inThread=True):
        """Reload settings, proxy lists, and bridges.

        State should be saved before calling this method, and will be saved
        again at the end of it.

        The internal variables, ``cfg``, ``hashring``, ``proxyList``,
        ``ipDistributor``, and ``emailDistributor`` are all taken from a
        :class:`~bridgedb.persistent.State` instance, which has been saved to
        a statefile with :meth:`bridgedb.persistent.State.save`.

        :type cfg: :class:`Conf`
        :ivar cfg: The current configuration, including any in-memory
            settings (i.e. settings whose values were not obtained from the
            config file, but were set via a function somewhere)
        :type hashring: A :class:`~bridgedb.Bridges.BridgeSplitter`
        :ivar hashring: A class which takes an HMAC key and splits bridges
            into their hashring assignments.
        :type proxyList: :class:`~bridgedb.proxy.ProxySet`
        :ivar proxyList: The container for the IP addresses of any currently
            known open proxies.
        :ivar ipDistributor: A
            :class:`~bridgedb.https.distributor.HTTPSDistributor`.
        :ivar emailDistributor: A
            :class:`~bridgedb.email.distributor.EmailDistributor`.
        :ivar dict tasks: A dictionary of ``{name: task}``, where name is a
            string to associate with the ``task``, and ``task`` is some
            scheduled event, repetitive or otherwise, for the :class:`reactor
            <twisted.internet.epollreactor.EPollReactor>`. See the classes
            within the :api:`twisted.internet.tasks` module.
        """
        logging.debug("Caught SIGHUP")
        logging.info("Reloading...")

        logging.info("Loading saved state...")
        state = persistent.load()
        cfg = loadConfig(state.CONFIG_FILE, state.config)
        logging.info("Updating any changed settings...")
        state.useChangedSettings(cfg)

        level = getattr(state, 'LOGLEVEL', 'WARNING')
        logging.info("Updating log level to: '%s'" % level)
        level = getattr(logging, level)
        logging.getLogger().setLevel(level)

        logging.info("Reloading the list of open proxies...")
        for proxyfile in cfg.PROXY_LIST_FILES:
            logging.info("Loading proxies from: %s" % proxyfile)
            proxy.loadProxiesFromFile(proxyfile,
                                      state.proxies,
                                      removeStale=True)

        logging.info("Reparsing bridge descriptors...")
        (hashring, emailDistributorTmp,
         ipDistributorTmp) = createBridgeRings(cfg, state.proxies, key)
        logging.info("Bridges loaded: %d" % len(hashring))

        # Initialize our DB.
        bridgedb.Storage.initializeDBLock()
        bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite")
        load(state, hashring, clear=False)

        if emailDistributorTmp is not None:
            emailDistributorTmp.prepopulateRings()  # create default rings
            logging.info(
                "Bridges allotted for %s distribution: %d" %
                (emailDistributorTmp.name, len(emailDistributorTmp.hashring)))
        else:
            logging.warn("No email distributor created!")

        if ipDistributorTmp is not None:
            ipDistributorTmp.prepopulateRings()  # create default rings

            logging.info(
                "Bridges allotted for %s distribution: %d" %
                (ipDistributorTmp.name, len(ipDistributorTmp.hashring)))
            logging.info("\tNum bridges:\tFilter set:")

            nSubrings = 0
            ipSubrings = ipDistributorTmp.hashring.filterRings
            for (ringname, (filterFn, subring)) in ipSubrings.items():
                nSubrings += 1
                filterSet = ' '.join(
                    ipDistributorTmp.hashring.extractFilterNames(ringname))
                logging.info("\t%2d bridges\t%s" % (len(subring), filterSet))

            logging.info("Total subrings for %s: %d" %
                         (ipDistributorTmp.name, nSubrings))
        else:
            logging.warn("No HTTP(S) distributor created!")

        # Dump bridge pool assignments to disk.
        try:
            logging.debug("Dumping pool assignments to file: '%s'" %
                          state.ASSIGNMENTS_FILE)
            fh = open(state.ASSIGNMENTS_FILE, 'a')
            fh.write("bridge-pool-assignment %s\n" %
                     time.strftime("%Y-%m-%d %H:%M:%S"))
            hashring.dumpAssignments(fh)
            fh.flush()
            fh.close()
        except IOError:
            logging.info("I/O error while writing assignments to: '%s'" %
                         state.ASSIGNMENTS_FILE)
        state.save()

        if inThread:
            # XXX shutdown the distributors if they were previously running
            # and should now be disabled
            if ipDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, ipDistributor,
                                       ipDistributorTmp)
            if emailDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, emailDistributor,
                                       emailDistributorTmp)
        else:
            # We're still starting up. Return these distributors so
            # they are configured in the outer-namespace
            return emailDistributorTmp, ipDistributorTmp

    global _reloadFn
    _reloadFn = reload
    signal.signal(signal.SIGHUP, _handleSIGHUP)
    signal.signal(signal.SIGUSR1, _handleSIGUSR1)

    if reactor:
        # And actually load it to start parsing. Get back our distributors.
        emailDistributor, ipDistributor = reload(False)

        # Configure all servers:
        if config.HTTPS_DIST and config.HTTPS_SHARE:
            addWebServer(config, ipDistributor)
        if config.EMAIL_DIST and config.EMAIL_SHARE:
            addSMTPServer(config, emailDistributor)

        tasks = {}

        # Setup all our repeating tasks:
        if config.TASKS['GET_TOR_EXIT_LIST']:
            tasks['GET_TOR_EXIT_LIST'] = task.LoopingCall(
                proxy.downloadTorExits, state.proxies,
                config.SERVER_PUBLIC_EXTERNAL_IP)

        if config.TASKS.get('DELETE_UNPARSEABLE_DESCRIPTORS'):
            delUnparseableSecs = config.TASKS['DELETE_UNPARSEABLE_DESCRIPTORS']
        else:
            delUnparseableSecs = 24 * 60 * 60  # Default to 24 hours

        # We use the directory name of STATUS_FILE, since that directory
        # is where the *.unparseable descriptor files will be written to.
        tasks['DELETE_UNPARSEABLE_DESCRIPTORS'] = task.LoopingCall(
            runner.cleanupUnparseableDescriptors,
            os.path.dirname(config.STATUS_FILE), delUnparseableSecs)

        # Schedule all configured repeating tasks:
        for name, seconds in config.TASKS.items():
            if seconds:
                try:
                    tasks[name].start(abs(seconds))
                except KeyError:
                    logging.info("Task %s is disabled and will not run." %
                                 name)
                else:
                    logging.info("Scheduled task %s to run every %s seconds." %
                                 (name, seconds))

    # Actually run the servers.
    try:
        if reactor and not reactor.running:
            logging.info("Starting reactors.")
            reactor.run()
    except KeyboardInterrupt:
        logging.fatal("Received keyboard interrupt. Shutting down...")
    finally:
        if config.PIDFILE:
            os.unlink(config.PIDFILE)
        logging.info("Exiting...")
        sys.exit()
Esempio n. 15
0
def run(options, reactor=reactor):
    """This is BridgeDB's main entry point and main runtime loop.

    Given the parsed commandline options, this function handles locating the
    configuration file, loading and parsing it, and then either (re)parsing
    plus (re)starting the servers, or dumping bridge assignments to files.

    :type options: :class:`bridgedb.parse.options.MainOptions`
    :param options: A pre-parsed options class containing any arguments and
        options given in the commandline we were called with.
    :type state: :class:`bridgedb.persistent.State`
    :ivar state: A persistent state object which holds config changes.
    :param reactor: An implementer of
        :api:`twisted.internet.interfaces.IReactorCore`. This parameter is
        mainly for testing; the default
        :api:`twisted.internet.epollreactor.EPollReactor` is fine for normal
        application runs.
    """
    # Change to the directory where we're supposed to run. This must be done
    # before parsing the config file, otherwise there will need to be two
    # copies of the config file, one in the directory BridgeDB is started in,
    # and another in the directory it changes into.
    os.chdir(options['rundir'])
    if options['verbosity'] <= 10: # Corresponds to logging.DEBUG
        print("Changed to runtime directory %r" % os.getcwd())

    config = loadConfig(options['config'])
    config.RUN_IN_DIR = options['rundir']

    # Set up logging as early as possible. We cannot import from the bridgedb
    # package any of our modules which import :mod:`logging` and start using
    # it, at least, not until :func:`safelog.configureLogging` is
    # called. Otherwise a default handler that logs to the console will be
    # created by the imported module, and all further calls to
    # :func:`logging.basicConfig` will be ignored.
    util.configureLogging(config)

    if options.subCommand is not None:
        runSubcommand(options, config)

    # Write the pidfile only after any options.subCommands are run (because
    # these exit when they are finished). Otherwise, if there is a subcommand,
    # the real PIDFILE would get overwritten with the PID of the temporary
    # bridgedb process running the subcommand.
    if config.PIDFILE:
        logging.debug("Writing server PID to file: '%s'" % config.PIDFILE)
        with open(config.PIDFILE, 'w') as pidfile:
            pidfile.write("%s\n" % os.getpid())
            pidfile.flush()

    from bridgedb import persistent

    state = persistent.State(config=config)

    from bridgedb.distributors.email.server import addServer as addSMTPServer
    from bridgedb.distributors.https.server import addWebServer
    from bridgedb.distributors.moat.server  import addMoatServer

    # Load the master key, or create a new one.
    key = crypto.getKey(config.MASTER_KEY_FILE)
    proxies = proxy.ProxySet()
    emailDistributor = None
    ipDistributor = None
    moatDistributor = None

    # Save our state
    state.proxies = proxies
    state.key = key
    state.save()

    def reload(inThread=True): # pragma: no cover
        """Reload settings, proxy lists, and bridges.

        State should be saved before calling this method, and will be saved
        again at the end of it.

        The internal variables, ``cfg``, ``hashring``, ``proxyList``,
        ``ipDistributor``, and ``emailDistributor`` are all taken from a
        :class:`~bridgedb.persistent.State` instance, which has been saved to
        a statefile with :meth:`bridgedb.persistent.State.save`.

        :type cfg: :class:`Conf`
        :ivar cfg: The current configuration, including any in-memory
            settings (i.e. settings whose values were not obtained from the
            config file, but were set via a function somewhere)
        :type hashring: A :class:`~bridgedb.Bridges.BridgeSplitter`
        :ivar hashring: A class which takes an HMAC key and splits bridges
            into their hashring assignments.
        :type proxyList: :class:`~bridgedb.proxy.ProxySet`
        :ivar proxyList: The container for the IP addresses of any currently
            known open proxies.
        :ivar ipDistributor: A
            :class:`~bridgedb.distributors.https.distributor.HTTPSDistributor`.
        :ivar emailDistributor: A
            :class:`~bridgedb.distributors.email.distributor.EmailDistributor`.
        :ivar dict tasks: A dictionary of ``{name: task}``, where name is a
            string to associate with the ``task``, and ``task`` is some
            scheduled event, repetitive or otherwise, for the :class:`reactor
            <twisted.internet.epollreactor.EPollReactor>`. See the classes
            within the :api:`twisted.internet.tasks` module.
        """
        logging.debug("Caught SIGHUP")
        logging.info("Reloading...")

        logging.info("Loading saved state...")
        state = persistent.load()
        cfg = loadConfig(state.CONFIG_FILE, state.config)
        logging.info("Updating any changed settings...")
        state.useChangedSettings(cfg)

        level = getattr(state, 'LOGLEVEL', 'WARNING')
        logging.info("Updating log level to: '%s'" % level)
        level = getattr(logging, level)
        logging.getLogger().setLevel(level)

        logging.info("Reloading the list of open proxies...")
        for proxyfile in cfg.PROXY_LIST_FILES:
            logging.info("Loading proxies from: %s" % proxyfile)
            proxy.loadProxiesFromFile(proxyfile, state.proxies, removeStale=True)

        logging.info("Reparsing bridge descriptors...")
        (hashring,
         emailDistributorTmp,
         ipDistributorTmp,
         moatDistributorTmp) = createBridgeRings(cfg, state.proxies, key)
        logging.info("Bridges loaded: %d" % len(hashring))

        # Initialize our DB.
        bridgedb.Storage.initializeDBLock()
        bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite")
        load(state, hashring, clear=False)

        if emailDistributorTmp is not None:
            emailDistributorTmp.prepopulateRings() # create default rings
        else:
            logging.warn("No email distributor created!")

        if ipDistributorTmp is not None:
            ipDistributorTmp.prepopulateRings() # create default rings
        else:
            logging.warn("No HTTP(S) distributor created!")

        if moatDistributorTmp is not None:
            moatDistributorTmp.prepopulateRings()
        else:
            logging.warn("No Moat distributor created!")

        # Dump bridge pool assignments to disk.
        writeAssignments(hashring, state.ASSIGNMENTS_FILE)
        state.save()

        if inThread:
            # XXX shutdown the distributors if they were previously running
            # and should now be disabled
            if moatDistributorTmp:
                reactor.callFromThread(replaceBridgeRings,
                                       moatDistributor, moatDistributorTmp)
            if ipDistributorTmp:
                reactor.callFromThread(replaceBridgeRings,
                                       ipDistributor, ipDistributorTmp)
            if emailDistributorTmp:
                reactor.callFromThread(replaceBridgeRings,
                                       emailDistributor, emailDistributorTmp)
        else:
            # We're still starting up. Return these distributors so
            # they are configured in the outer-namespace
            return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp

    global _reloadFn
    _reloadFn = reload
    signal.signal(signal.SIGHUP, _handleSIGHUP)

    if reactor:  # pragma: no cover
        # And actually load it to start parsing. Get back our distributors.
        emailDistributor, ipDistributor, moatDistributor = reload(False)

        # Configure all servers:
        if config.MOAT_DIST and config.MOAT_SHARE:
            addMoatServer(config, moatDistributor)
        if config.HTTPS_DIST and config.HTTPS_SHARE:
            addWebServer(config, ipDistributor)
        if config.EMAIL_DIST and config.EMAIL_SHARE:
            addSMTPServer(config, emailDistributor)

        tasks = {}

        # Setup all our repeating tasks:
        if config.TASKS['GET_TOR_EXIT_LIST']:
            tasks['GET_TOR_EXIT_LIST'] = task.LoopingCall(
                proxy.downloadTorExits,
                state.proxies,
                config.SERVER_PUBLIC_EXTERNAL_IP)

        if config.TASKS.get('DELETE_UNPARSEABLE_DESCRIPTORS'):
            delUnparseableSecs = config.TASKS['DELETE_UNPARSEABLE_DESCRIPTORS']
        else:
            delUnparseableSecs = 24 * 60 * 60  # Default to 24 hours

        # We use the directory name of STATUS_FILE, since that directory
        # is where the *.unparseable descriptor files will be written to.
        tasks['DELETE_UNPARSEABLE_DESCRIPTORS'] = task.LoopingCall(
            runner.cleanupUnparseableDescriptors,
            os.path.dirname(config.STATUS_FILE), delUnparseableSecs)

        # Schedule all configured repeating tasks:
        for name, seconds in config.TASKS.items():
            if seconds:
                try:
                    # Set now to False to get the servers up and running when
                    # first started, rather than spend a bunch of time in
                    # scheduled tasks.
                    tasks[name].start(abs(seconds), now=False)
                except KeyError:
                    logging.info("Task %s is disabled and will not run." % name)
                else:
                    logging.info("Scheduled task %s to run every %s seconds."
                                 % (name, seconds))

    # Actually run the servers.
    try:
        if reactor and not reactor.running:
            logging.info("Starting reactors.")
            reactor.run()
    except KeyboardInterrupt: # pragma: no cover
        logging.fatal("Received keyboard interrupt. Shutting down...")
    finally:
        if config.PIDFILE:
            os.unlink(config.PIDFILE)
        logging.info("Exiting...")
        sys.exit()
Esempio n. 16
0
 def test_loadConfig_returns_Conf(self):
     """After loading and parsing the ``bridgedb.conf`` file, we should have
     a :class:`bridgedb.configure.Conf`.
     """
     config = configure.loadConfig(self.configFilename)
     self.assertIsInstance(config, configure.Conf)
Esempio n. 17
0
    def reload(inThread=True):
        """Reload settings, proxy lists, and bridges.

        State should be saved before calling this method, and will be saved
        again at the end of it.

        The internal variables, ``cfg``, ``splitter``, ``proxyList``,
        ``ipDistributor``, and ``emailDistributor`` are all taken from a
        :class:`~bridgedb.persistent.State` instance, which has been saved to
        a statefile with :meth:`bridgedb.persistent.State.save`.

        :type cfg: :class:`Conf`
        :ivar cfg: The current configuration, including any in-memory
            settings (i.e. settings whose values were not obtained from the
            config file, but were set via a function somewhere)
        :type splitter: A :class:`bridgedb.Bridges.BridgeHolder`
        :ivar splitter: A class which takes an HMAC key and splits bridges
            into their hashring assignments.
        :type proxyList: :class:`~bridgedb.proxy.ProxySet`
        :ivar proxyList: The container for the IP addresses of any currently
             known open proxies.
        :ivar ipDistributor: A :class:`Dist.IPBasedDistributor`.
        :ivar emailDistributor: A :class:`Dist.EmailBasedDistributor`.
        :ivar dict tasks: A dictionary of ``{name: task}``, where name is a
            string to associate with the ``task``, and ``task`` is some
            scheduled event, repetitive or otherwise, for the :class:`reactor
            <twisted.internet.epollreactor.EPollReactor>`. See the classes
            within the :api:`twisted.internet.tasks` module.
        """
        logging.debug("Caught SIGHUP")
        logging.info("Reloading...")

        logging.info("Loading saved state...")
        state = persistent.load()
        cfg = loadConfig(state.CONFIG_FILE, state.config)
        logging.info("Updating any changed settings...")
        state.useChangedSettings(cfg)

        level = getattr(state, 'LOGLEVEL', 'WARNING')
        logging.info("Updating log level to: '%s'" % level)
        level = getattr(logging, level)
        logging.getLogger().setLevel(level)

        logging.debug("Saving state again before reparsing descriptors...")
        state.save()
        logging.info("Reparsing bridge descriptors...")

        (splitter, emailDistributorTmp,
         ipDistributorTmp) = createBridgeRings(cfg, proxyList, key)

        # Initialize our DB.
        bridgedb.Storage.initializeDBLock()
        db = bridgedb.Storage.openDatabase(cfg.DB_FILE + ".sqlite")
        bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite")
        load(state, splitter, clear=False)

        state = persistent.load()
        logging.info("Bridges loaded: %d" % len(splitter))

        logging.debug("Replacing the list of open proxies...")
        for proxyfile in cfg.PROXY_LIST_FILES:
            proxy.loadProxiesFromFile(proxyfile,
                                      state.proxyList,
                                      removeStale=True)

        if emailDistributorTmp is not None:
            emailDistributorTmp.prepopulateRings()  # create default rings
            logging.info(
                "Bridges allotted for %s distribution: %d" %
                (emailDistributorTmp.name, len(emailDistributorTmp.splitter)))
        else:
            logging.warn("No email distributor created!")

        if ipDistributorTmp is not None:
            ipDistributorTmp.prepopulateRings()  # create default rings

            logging.info(
                "Bridges allotted for %s distribution: %d" %
                (ipDistributorTmp.name, len(ipDistributorTmp.splitter)))
            logging.info("\tNum bridges:\tFilter set:")

            nSubrings = 0
            ipSubrings = ipDistributorTmp.splitter.filterRings
            for (ringname, (filterFn, subring)) in ipSubrings.items():
                nSubrings += 1
                filterSet = ' '.join(
                    ipDistributorTmp.splitter.extractFilterNames(ringname))
                logging.info("\t%2d bridges\t%s" % (len(subring), filterSet))

            logging.info("Total subrings for %s: %d" %
                         (ipDistributorTmp.name, nSubrings))
        else:
            logging.warn("No HTTP(S) distributor created!")

        # Dump bridge pool assignments to disk.
        try:
            logging.debug("Dumping pool assignments to file: '%s'" %
                          state.ASSIGNMENTS_FILE)
            fh = open(state.ASSIGNMENTS_FILE, 'a')
            fh.write("bridge-pool-assignment %s\n" %
                     time.strftime("%Y-%m-%d %H:%M:%S"))
            splitter.dumpAssignments(fh)
            fh.flush()
            fh.close()
        except IOError:
            logging.info("I/O error while writing assignments to: '%s'" %
                         state.ASSIGNMENTS_FILE)
        state.save()

        if inThread:
            # XXX shutdown the distributors if they were previously running
            # and should now be disabled
            if ipDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, ipDistributor,
                                       ipDistributorTmp)
            if emailDistributorTmp:
                reactor.callFromThread(replaceBridgeRings, emailDistributor,
                                       emailDistributorTmp)
        else:
            # We're still starting up. Return these distributors so
            # they are configured in the outer-namespace
            return emailDistributorTmp, ipDistributorTmp