def test_handouts(self): metrix = metrics.InternalMetrics() metrix.recordHandoutsPerBridge(None, None) self.assertEqual(len(metrix.hotMetrics), 0) req = HTTPSBridgeRequest() req.withIPversion({}) req.isValid(True) bridges = copy.deepcopy(util.generateFakeBridges()) metrix.recordHandoutsPerBridge(req, [bridges[0]]) self.assertNotIn("internal.handouts.median", metrix.hotMetrics.keys()) metrix.recordHandoutsPerBridge(req, [bridges[1]]) self.assertNotIn("internal.handouts.median", metrix.hotMetrics.keys()) metrix.recordHandoutsPerBridge(req, [bridges[2]]) self.assertEqual(metrix.hotMetrics["internal.handouts.median"], 1) metrix.recordHandoutsPerBridge(req, [bridges[1]]) metrix.recordHandoutsPerBridge(req, [bridges[2]]) metrix.recordHandoutsPerBridge(req, [bridges[2]]) self.assertEqual(metrix.hotMetrics["internal.handouts.min"], 1) self.assertEqual(metrix.hotMetrics["internal.handouts.median"], 2) self.assertEqual(metrix.hotMetrics["internal.handouts.max"], 3) self.assertEqual(metrix.hotMetrics["internal.handouts.unique-bridges"], 3) self.assertEqual(metrix.hotMetrics["internal.handouts.quartile1"], 1.5) self.assertEqual(metrix.hotMetrics["internal.handouts.quartile3"], 2.5) self.assertEqual(metrix.hotMetrics["internal.handouts.lower-whisker"], 1) self.assertEqual(metrix.hotMetrics["internal.handouts.upper-whisker"], 3)
def test_bridge_handouts(self): metrix = metrics.InternalMetrics() bridges = copy.deepcopy(util.generateFakeBridges()) bridge1, bridge2, bridge3 = bridges[0:3] m = metrix.hotMetrics br = HTTPSBridgeRequest() br.withIPversion({"ipv6": "4"}) br.isValid(True) # Record a number of distribution events for three separate bridges. for i in range(10): metrix.recordHandoutsPerBridge(br, [bridge1]) for i in range(5): metrix.recordHandoutsPerBridge(br, [bridge2]) metrix.recordHandoutsPerBridge(br, [bridge3]) self.assertEqual(m["internal.handouts.unique-bridges"], 3) self.assertEqual(m["internal.handouts.min"], 1) self.assertEqual(m["internal.handouts.max"], 10) self.assertEqual(m["internal.handouts.median"], 5) # Internal metrics must not be sanitized. metrix.rotate() lines = metrix.getMetrics() self.assertIn("internal.handouts.unique-bridges 3", lines) self.assertIn("internal.handouts.median 5", lines) self.assertIn("internal.handouts.min 1", lines) self.assertIn("internal.handouts.max 10", lines)
def test_metrics_reset(self): metrix = metrics.InternalMetrics() req = HTTPSBridgeRequest() req.withIPversion({}) bridges = copy.deepcopy(util.generateFakeBridges()) metrix.recordHandoutsPerBridge(req, [bridges[0]]) self.assertTrue(len(metrix.bridgeHandouts) > 0) metrix.reset() self.assertTrue(len(metrix.bridgeHandouts) == 0)
def test_rings(self): metrix = metrics.InternalMetrics() # Empty parameters must not be recorded. metrix.recordBridgesInHashring("", "", 20) self.assertEqual(len(metrix.hotMetrics), 0) metrix.recordBridgesInHashring("https", "byIPv6-bySubring1of4", 20) self.assertEqual(len(metrix.hotMetrics), 1) self.assertEqual(list(metrix.hotMetrics.keys()), ["internal.https.byipv6-bysubring1of4"])
def test_empty_responses(self): metrix = metrics.InternalMetrics() # Unlike all other internal metrics, empty responses are sanitized. for i in range(10): metrix.recordEmptyEmailResponse() for i in range(11): metrix.recordEmptyMoatResponse() metrix.recordEmptyHTTPSResponse() metrix.rotate() lines = metrix.getMetrics() self.assertEqual(len(lines), 3) self.assertIn("internal.email.empty-response 10", lines) self.assertIn("internal.moat.empty-response 20", lines) self.assertIn("internal.https.empty-response 10", lines)
def setUp(self): self.topDir = os.getcwd().rstrip('_trial_temp') self.captchaDir = os.path.join(self.topDir, 'captchas') # Clear all singletons before each test to prevent cross-test # interference. type(metrics.HTTPSMetrics()).clear() type(metrics.EmailMetrics()).clear() type(metrics.MoatMetrics()).clear() type(metrics.InternalMetrics()).clear() metrics.setSupportedTransports({ 'obfs2': False, 'obfs3': True, 'obfs4': True, 'scramblesuit': True, 'fte': True, }) self.metrix = metrics.HTTPSMetrics() self.key = self.metrix.createKey("https", "obfs4", "de", True, None)
def test_ipv4_ipv6_requests(self): metrix = metrics.InternalMetrics() v6Req = HTTPSBridgeRequest() v6Req.withIPversion({"ipv6": "4"}) v4Req = HTTPSBridgeRequest() v4Req.withIPversion({}) bridges = copy.deepcopy(util.generateFakeBridges()) for i in range(9): metrix.recordHandoutsPerBridge(v6Req, [bridges[0]]) metrix.recordHandoutsPerBridge(v6Req, [bridges[1]]) for i in range(11): metrix.recordHandoutsPerBridge(v4Req, [bridges[0]]) metrix.rotate() lines = metrix.getMetrics() self.assertIn("internal.handouts.ipv6 10", lines) self.assertIn("internal.handouts.ipv4 20", lines)
def reload(inThread=True): # pragma: no cover """Reload settings, proxy lists, and bridges. State should be saved before calling this method, and will be saved again at the end of it. The internal variables ``cfg`` and ``hashring`` are taken from a :class:`~bridgedb.persistent.State` instance, which has been saved to a statefile with :meth:`bridgedb.persistent.State.save`. :type cfg: :class:`Conf` :ivar cfg: The current configuration, including any in-memory settings (i.e. settings whose values were not obtained from the config file, but were set via a function somewhere) :type hashring: A :class:`~bridgedb.bridgerings.BridgeSplitter` :ivar hashring: A class which takes an HMAC key and splits bridges into their hashring assignments. """ logging.debug("Caught SIGHUP") logging.info("Reloading...") logging.info("Loading saved state...") state = persistent.load() cfg = loadConfig(state.CONFIG_FILE, state.config) logging.info("Updating any changed settings...") state.useChangedSettings(cfg) level = getattr(state, 'LOGLEVEL', 'WARNING') logging.info("Updating log level to: '%s'" % level) level = getattr(logging, level) logging.getLogger().setLevel(level) logging.info("Reloading the list of open proxies...") for proxyfile in cfg.PROXY_LIST_FILES: logging.info("Loading proxies from: %s" % proxyfile) proxy.loadProxiesFromFile(proxyfile, proxies, removeStale=True) metrics.setProxies(proxies) state.BLACKLISTED_TOR_VERSIONS = parseVersionsList( state.BLACKLISTED_TOR_VERSIONS) logging.info("Reloading blacklisted request headers...") antibot.loadBlacklistedRequestHeaders( config.BLACKLISTED_REQUEST_HEADERS_FILE) logging.info("Reloading decoy bridges...") antibot.loadDecoyBridges(config.DECOY_BRIDGES_FILE) (hashring, emailDistributorTmp, ipDistributorTmp, moatDistributorTmp) = createBridgeRings(cfg, proxies, key) # Initialize our DB. bridgedb.Storage.initializeDBLock() bridgedb.Storage.setDBFilename(cfg.DB_FILE + ".sqlite") logging.info("Reparsing bridge descriptors...") load(state, hashring, clear=False) logging.info("Bridges loaded: %d" % len(hashring)) loadBlockedBridges(hashring) if emailDistributorTmp is not None: emailDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No email distributor created!") if ipDistributorTmp is not None: ipDistributorTmp.prepopulateRings() # create default rings else: logging.warn("No HTTP(S) distributor created!") if moatDistributorTmp is not None: moatDistributorTmp.prepopulateRings() else: logging.warn("No Moat distributor created!") metrix = metrics.InternalMetrics() logging.info("Logging bridge ring metrics for %d rings." % len(hashring.ringsByName)) for ringName, ring in hashring.ringsByName.items(): # Ring is of type FilteredBridgeSplitter or UnallocatedHolder. # FilteredBridgeSplitter splits bridges into subhashrings based on # filters. if hasattr(ring, "filterRings"): for (ringname, (filterFn, subring)) in ring.filterRings.items(): subRingName = "-".join(ring.extractFilterNames(ringname)) metrix.recordBridgesInHashring(ringName, subRingName, len(subring)) elif hasattr(ring, "fingerprints"): metrix.recordBridgesInHashring(ringName, "unallocated", len(ring.fingerprints)) # Dump bridge pool assignments to disk. writeAssignments(hashring, state.ASSIGNMENTS_FILE) state.save() if inThread: # XXX shutdown the distributors if they were previously running # and should now be disabled if moatDistributorTmp: reactor.callFromThread(replaceBridgeRings, moatDistributor, moatDistributorTmp) if ipDistributorTmp: reactor.callFromThread(replaceBridgeRings, ipDistributor, ipDistributorTmp) if emailDistributorTmp: reactor.callFromThread(replaceBridgeRings, emailDistributor, emailDistributorTmp) else: # We're still starting up. Return these distributors so # they are configured in the outer-namespace return emailDistributorTmp, ipDistributorTmp, moatDistributorTmp
from bridgedb import captcha from bridgedb import crypto from bridgedb import antibot from bridgedb.distributors.common.http import setFQDN from bridgedb.distributors.common.http import getFQDN from bridgedb.distributors.common.http import getClientIP from bridgedb.distributors.moat.request import MoatBridgeRequest from bridgedb.qrcodes import generateQR from bridgedb.schedule import Unscheduled from bridgedb.schedule import ScheduledInterval from bridgedb.util import replaceControlChars # We use our metrics singletons to keep track of BridgeDB metrics such as # "number of failed HTTPS bridge requests." moatMetrix = metrics.MoatMetrics() internalMetrix = metrics.InternalMetrics() #: The current version of the moat JSON API that we speak MOAT_API_VERSION = '0.1.0' #: The root path to resources for the moat server SERVER_PUBLIC_ROOT = None #: An ordered list of the preferred transports which moat should #: distribute, in order from most preferable to least preferable. TRANSPORT_PREFERENCE_LIST = None #: All of the pluggable transports BridgeDB currently supports. SUPPORTED_TRANSPORTS = None