class KeyGeneratorService(service.MultiService): furl_file = 'key_generator.furl' def __init__(self, basedir='.', display_furl=True, default_key_size=2048): service.MultiService.__init__(self) self.basedir = basedir self.tub = Tub(certFile=os.path.join(self.basedir, 'key_generator.pem')) self.tub.setOption("expose-remote-exception-types", False) self.tub.setServiceParent(self) self.key_generator = KeyGenerator(default_key_size=default_key_size) self.key_generator.setServiceParent(self) portnum = self.get_portnum() self.listener = self.tub.listenOn(portnum or 'tcp:0') d = self.tub.setLocationAutomatically() if portnum is None: d.addCallback(self.save_portnum) d.addCallback(self.tub_ready, display_furl) d.addErrback(log.err) def get_portnum(self): portnumfile = os.path.join(self.basedir, 'portnum') if os.path.exists(portnumfile): return file(portnumfile, 'rb').read().strip() def save_portnum(self, junk): portnum = self.listener.getPortnum() portnumfile = os.path.join(self.basedir, 'portnum') file(portnumfile, 'wb').write('%d\n' % (portnum,)) def tub_ready(self, junk, display_furl): kgf = os.path.join(self.basedir, self.furl_file) self.keygen_furl = self.tub.registerReference(self.key_generator, furlFile=kgf) if display_furl: print 'key generator at:', self.keygen_furl
def test_failure(self): self.basedir = "introducer/NonV1Server/failure" os.makedirs(self.basedir) self.create_tub() i = TooNewServer() i.setServiceParent(self.parent) self.introducer_furl = self.central_tub.registerReference(i) tub = Tub() tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) l = tub.listenOn("tcp:0") portnum = l.getPortnum() tub.setLocation("localhost:%d" % portnum) c = IntroducerClient(tub, self.introducer_furl, u"nickname-client", "version", "oldest") announcements = {} def got(serverid, ann_d): announcements[serverid] = ann_d c.subscribe_to("storage", got) c.setServiceParent(self.parent) # now we wait for it to connect and notice the bad version def _got_bad(): return bool(c._introducer_error) or bool(c._publisher) d = self.poll(_got_bad) def _done(res): self.failUnless(c._introducer_error) self.failUnless(c._introducer_error.check(InsufficientVersionError)) d.addCallback(_done) return d
def create_tub(tub_options, default_connection_handlers, foolscap_connection_handlers, handler_overrides={}, **kwargs): """ Create a Tub with the right options and handlers. It will be ephemeral unless the caller provides certFile= in kwargs :param handler_overrides: anything in this will override anything in `default_connection_handlers` for just this call. :param dict tub_options: every key-value pair in here will be set in the new Tub via `Tub.setOption` """ tub = Tub(**kwargs) for (name, value) in list(tub_options.items()): tub.setOption(name, value) handlers = default_connection_handlers.copy() handlers.update(handler_overrides) tub.removeAllConnectionHintHandlers() for hint_type, handler_name in list(handlers.items()): handler = foolscap_connection_handlers.get(handler_name) if handler: tub.addConnectionHintHandler(hint_type, handler) return tub
def test_failure(self): self.basedir = "introducer/NonV1Server/failure" os.makedirs(self.basedir) self.create_tub() i = TooNewServer() i.setServiceParent(self.parent) self.introducer_furl = self.central_tub.registerReference(i) tub = Tub() tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) listenOnUnused(tub) c = IntroducerClient(tub, self.introducer_furl, u"nickname-client", "version", "oldest", fakeseq, FilePath(self.mktemp())) announcements = {} def got(key_s, ann): announcements[key_s] = ann c.subscribe_to("storage", got) c.setServiceParent(self.parent) # now we wait for it to connect and notice the bad version def _got_bad(): return bool(c._introducer_error) or bool(c._publisher) d = self.poll(_got_bad) def _done(res): self.failUnless(c._introducer_error) self.failUnless(c._introducer_error.check(InsufficientVersionError), c._introducer_error) d.addCallback(_done) return d
def create_tub(self, basedir): os.makedirs(basedir) tubfile = os.path.join(basedir, "tub.pem") tub = Tub(certFile=tubfile) tub.setOption("expose-remote-exception-types", False) tub.startService() self.addCleanup(tub.stopService) return tub
def test_logport_furlfile1(self): basedir = "unreachable/References/logport_furlfile1" os.makedirs(basedir) furlfile = os.path.join(basedir, "logport.furl") t = Tub() # setOption before setServiceParent t.setOption("logport-furlfile", furlfile) t.setServiceParent(self.s) self.assertRaises(NoLocationError, t.getLogPort) self.assertRaises(NoLocationError, t.getLogPortFURL) self.assertFalse(os.path.exists(furlfile))
def test_logport_furlfile1(self): basedir = "unreachable/References/logport_furlfile1" os.makedirs(basedir) furlfile = os.path.join(basedir, "logport.furl") t = Tub() # setOption before setServiceParent t.setOption("logport-furlfile", furlfile) t.setServiceParent(self.s) self.failUnlessRaises(NoLocationError, t.getLogPort) self.failUnlessRaises(NoLocationError, t.getLogPortFURL) self.failIf(os.path.exists(furlfile))
def _create_tub(self, handler_overrides={}, **kwargs): # Create a Tub with the right options and handlers. It will be # ephemeral unless the caller provides certFile= tub = Tub(**kwargs) for (name, value) in self.tub_options.items(): tub.setOption(name, value) handlers = self._default_connection_handlers.copy() handlers.update(handler_overrides) tub.removeAllConnectionHintHandlers() for hint_type, handler_name in handlers.items(): handler = self._foolscap_connection_handlers.get(handler_name) if handler: tub.addConnectionHintHandler(hint_type, handler) return tub
class TubFailures(ExamineFailuresMixin, ShouldFailMixin, unittest.TestCase): def setUp(self): self.s = service.MultiService() self.s.startService() self.target_tub = Tub() self.target_tub.setServiceParent(self.s) l = self.target_tub.listenOn("tcp:0:interface=127.0.0.1") self.target_tub.setLocation("127.0.0.1:%d" % l.getPortnum()) self.source_tub = Tub() self.source_tub.setServiceParent(self.s) def tearDown(self): return self.s.stopService() def setupTarget(self, target): furl = self.target_tub.registerReference(target) d = self.source_tub.getReference(furl) return d def test_raise_not_exposed(self): self.source_tub.setOption("expose-remote-exception-types", False) d = self.setupTarget(TargetWithoutInterfaces()) d.addCallback(lambda rr: self.shouldFail(RemoteException, "one", None, rr.callRemote, "fail")) d.addCallback(self._examine_raise, True) return d def test_raise_yes_exposed(self): self.source_tub.setOption("expose-remote-exception-types", True) d = self.setupTarget(TargetWithoutInterfaces()) d.addCallback(lambda rr: self.shouldFail(ValueError, "one", None, rr.callRemote, "fail")) d.addCallback(self._examine_raise, False) return d def test_raise_default(self): # current default is to expose exceptions. This may change in the # future. d = self.setupTarget(TargetWithoutInterfaces()) d.addCallback(lambda rr: self.shouldFail(ValueError, "one", None, rr.callRemote, "fail")) d.addCallback(self._examine_raise, False) return d
def makeService(config, reactor=reactor): parent = MultiService() basedir = FilePath(os.path.expanduser(config["basedir"])) basedir.makedirs(ignoreExistingDirectory=True) basedir.chmod(0o700) data = Data(basedir.child("config.json")) dns_server = DNSServerFactory(verbose=0) s1 = UDPServer(int(config["dns-port"]), dns.DNSDatagramProtocol(dns_server), interface=config["dns-interface"]) s1.setServiceParent(parent) s2 = TCPServer(int(config["dns-port"]), dns_server, interface=config["dns-interface"]) s2.setServiceParent(parent) s = Server(data, dns_server) s.update_records() certFile = basedir.child("tub.data").path #furlFile = basedir.child("server.furl").path t = Tub(certFile=certFile) t.setOption("keepaliveTimeout", 60) # ping after 60s of idle t.setOption("disconnectTimeout", 5*60) # disconnect/reconnect after 5m #t.setOption("logLocalFailures", True) #t.setOption("logRemoteFailures", True) #t.unsafeTracebacks = True fp = config["foolscap-port"] if not fp.startswith("tcp:"): raise usage.UsageError("I don't know how to handle non-tcp foolscap-port=") port = int(fp.split(":")[1]) assert port > 1 t.listenOn(fp) t.setLocation("tcp:%s:%d" % (config["hostname"], port)) c = Controller(data, s) cf = t.registerReference(c, furlFile=basedir.child("controller.furl").path) furl_prefix = cf[:cf.rfind("/")+1] c.set_furl_prefix(furl_prefix) t.registerNameLookupHandler(c.lookup) t.setServiceParent(parent) return parent
class TubFailures(ExamineFailuresMixin, ShouldFailMixin, unittest.TestCase): def setUp(self): self.s = service.MultiService() self.s.startService() self.target_tub = Tub() self.target_tub.setServiceParent(self.s) l = self.target_tub.listenOn("tcp:0:interface=127.0.0.1") self.target_tub.setLocation("127.0.0.1:%d" % l.getPortnum()) self.source_tub = Tub() self.source_tub.setServiceParent(self.s) def tearDown(self): return self.s.stopService() def setupTarget(self, target): furl = self.target_tub.registerReference(target) d = self.source_tub.getReference(furl) return d def test_raise_not_exposed(self): self.source_tub.setOption("expose-remote-exception-types", False) d = self.setupTarget(TargetWithoutInterfaces()) d.addCallback(lambda rr: self.shouldFail(RemoteException, "one", None, rr.callRemote, "fail")) d.addCallback(self._examine_raise, True) return d def test_raise_yes_exposed(self): self.source_tub.setOption("expose-remote-exception-types", True) d = self.setupTarget(TargetWithoutInterfaces()) d.addCallback(lambda rr: self.shouldFail(ValueError, "one", None, rr. callRemote, "fail")) d.addCallback(self._examine_raise, False) return d def test_raise_default(self): # current default is to expose exceptions. This may change in the # future. d = self.setupTarget(TargetWithoutInterfaces()) d.addCallback(lambda rr: self.shouldFail(ValueError, "one", None, rr. callRemote, "fail")) d.addCallback(self._examine_raise, False) return d
def makeService(config, reactor=reactor): parent = MultiService() basedir = FilePath(os.path.expanduser(config["basedir"])) basedir.makedirs(ignoreExistingDirectory=True) basedir.chmod(0o700) data = Data(basedir.child("config.json")) certFile = basedir.child("tub.data").path tub = Tub(certFile=certFile) tub.setOption("keepaliveTimeout", 60) # ping after 60s of idle tub.setOption("disconnectTimeout", 5*60) # disconnect/reconnect after 5m tub.listenOn("tcp:6319:interface=127.0.0.1") tub.setLocation("tcp:127.0.0.1:6319") tub.setServiceParent(parent) acme_path = basedir.asTextMode() acme_key = maybe_key(acme_path) cert_store = FlancerCertificateStore(data, basedir) staging = not config["really"] if staging: print("STAGING mode") le_url = LETSENCRYPT_STAGING_DIRECTORY else: print("REAL CERTIFICATE mode") le_url = LETSENCRYPT_DIRECTORY client_creator = partial(Client.from_url, reactor=reactor, url=le_url, key=acme_key, alg=RS256) r = FlancerResponder(tub, data) issuer = AcmeIssuingService(cert_store, client_creator, reactor, [r]) issuer.setServiceParent(parent) if "dyndns_furl" in data: start_dyndns_canary(tub, data["dyndns_furl"].encode("ascii")) c = Controller(tub, data, issuer) tub.registerReference(c, furlFile=basedir.child("controller.furl").path) #TimerService(5*60.0, f.timerUpdateStats).setServiceParent(parent) return parent
def test_failure(self): self.basedir = "introducer/NonV1Server/failure" os.makedirs(self.basedir) self.create_tub() i = TooNewServer() i.setServiceParent(self.parent) self.introducer_furl = self.central_tub.registerReference(i) tub = Tub() tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) l = tub.listenOn("tcp:0") portnum = l.getPortnum() tub.setLocation("localhost:%d" % portnum) c = IntroducerClient(tub, self.introducer_furl, u"nickname-client", "version", "oldest") announcements = {} def got(serverid, ann_d): announcements[serverid] = ann_d c.subscribe_to("storage", got) c.setServiceParent(self.parent) # now we wait for it to connect and notice the bad version def _got_bad(): return bool(c._introducer_error) or bool(c._publisher) d = self.poll(_got_bad) def _done(res): self.failUnless(c._introducer_error) self.failUnless( c._introducer_error.check(InsufficientVersionError)) d.addCallback(_done) return d
class StatsGathererService(service.MultiService): furl_file = "stats_gatherer.furl" def __init__(self, basedir=".", verbose=False): service.MultiService.__init__(self) self.basedir = basedir self.tub = Tub(certFile=os.path.join(self.basedir, "stats_gatherer.pem")) self.tub.setServiceParent(self) self.tub.setOption("logLocalFailures", True) self.tub.setOption("logRemoteFailures", True) self.tub.setOption("expose-remote-exception-types", False) self.stats_gatherer = JSONStatsGatherer(self.basedir, verbose) self.stats_gatherer.setServiceParent(self) try: with open(os.path.join(self.basedir, "location")) as f: location = f.read().strip() except EnvironmentError: raise ValueError("Unable to find 'location' in BASEDIR, please rebuild your stats-gatherer") try: with open(os.path.join(self.basedir, "port")) as f: port = f.read().strip() except EnvironmentError: raise ValueError("Unable to find 'port' in BASEDIR, please rebuild your stats-gatherer") self.tub.listenOn(port) self.tub.setLocation(location) ff = os.path.join(self.basedir, self.furl_file) self.gatherer_furl = self.tub.registerReference(self.stats_gatherer, furlFile=ff)
class KeyGeneratorService(service.MultiService): furl_file = 'key_generator.furl' def __init__(self, basedir='.', display_furl=True, default_key_size=2048): service.MultiService.__init__(self) self.basedir = basedir fileutil.make_dirs(self.basedir) self.tub = Tub( certFile=os.path.join(self.basedir, 'key_generator.pem')) self.tub.setOption("expose-remote-exception-types", False) self.tub.setServiceParent(self) self.key_generator = KeyGenerator(default_key_size=default_key_size) self.key_generator.setServiceParent(self) portnum = self.get_portnum() self.listener = self.tub.listenOn(portnum or 'tcp:0') d = self.tub.setLocationAutomatically() if portnum is None: d.addCallback(self.save_portnum) d.addCallback(self.tub_ready, display_furl) d.addErrback(log.err) def get_portnum(self): portnumfile = os.path.join(self.basedir, 'portnum') if os.path.exists(portnumfile): return file(portnumfile, 'rb').read().strip() def save_portnum(self, junk): portnum = self.listener.getPortnum() portnumfile = os.path.join(self.basedir, 'portnum') file(portnumfile, 'wb').write('%d\n' % (portnum, )) def tub_ready(self, junk, display_furl): kgf = os.path.join(self.basedir, self.furl_file) self.keygen_furl = self.tub.registerReference(self.key_generator, furlFile=kgf) if display_furl: print 'key generator at:', self.keygen_furl
def create_tub(tub_options, default_connection_handlers, foolscap_connection_handlers, handler_overrides={}, **kwargs): """ Create a Tub with the right options and handlers. It will be ephemeral unless the caller provides certFile= in kwargs :param handler_overrides: anything in this will override anything in `default_connection_handlers` for just this call. :param dict tub_options: every key-value pair in here will be set in the new Tub via `Tub.setOption` """ tub = Tub(**kwargs) for (name, value) in tub_options.items(): tub.setOption(name, value) handlers = default_connection_handlers.copy() handlers.update(handler_overrides) tub.removeAllConnectionHintHandlers() for hint_type, handler_name in handlers.items(): handler = foolscap_connection_handlers.get(handler_name) if handler: tub.addConnectionHintHandler(hint_type, handler) return tub
class StatsGathererService(service.MultiService): furl_file = "stats_gatherer.furl" def __init__(self, basedir=".", verbose=False): service.MultiService.__init__(self) self.basedir = basedir self.tub = Tub(certFile=os.path.join(self.basedir, "stats_gatherer.pem")) self.tub.setServiceParent(self) self.tub.setOption("logLocalFailures", True) self.tub.setOption("logRemoteFailures", True) self.tub.setOption("expose-remote-exception-types", False) self.stats_gatherer = PickleStatsGatherer(self.basedir, verbose) self.stats_gatherer.setServiceParent(self) portnumfile = os.path.join(self.basedir, "portnum") try: portnum = open(portnumfile, "r").read() except EnvironmentError: portnum = None self.listener = self.tub.listenOn(portnum or "tcp:0") d = self.tub.setLocationAutomatically() if portnum is None: d.addCallback(self.save_portnum) d.addCallback(self.tub_ready) d.addErrback(log.err) def save_portnum(self, junk): portnum = self.listener.getPortnum() portnumfile = os.path.join(self.basedir, 'portnum') open(portnumfile, 'wb').write('%d\n' % (portnum,)) def tub_ready(self, ignored): ff = os.path.join(self.basedir, self.furl_file) self.gatherer_furl = self.tub.registerReference(self.stats_gatherer, furlFile=ff)
class ReferenceCounting(ShouldFailMixin, unittest.TestCase): def setUp(self): self.s = service.MultiService() self.s.startService() self.target_tub = Tub() self.target_tub.setServiceParent(self.s) l = self.target_tub.listenOn("tcp:0:interface=127.0.0.1") self.target_tub.setLocation("127.0.0.1:%d" % l.getPortnum()) self.source_tub = Tub() self.source_tub.setServiceParent(self.s) def tearDown(self): return self.s.stopService() def setupTarget(self, target): furl = self.target_tub.registerReference(target) d = self.source_tub.getReference(furl) return d def test_reference_counting(self): self.source_tub.setOption("expose-remote-exception-types", True) target = HelperTarget() d = self.setupTarget(target) def _stash(rref): # to exercise bug #104, we need to trigger remote Violations, so # we tell the sending side to not use a RemoteInterface. We do # this by reaching inside the RemoteReference and making it # forget rref.tracker.interfaceName = None rref.tracker.interface = None self.rref = rref d.addCallback(_stash) # the first call causes an error, which discards all remaining # tokens, including the OPEN tokens for the arguments. The #104 bug # is that this causes the open-count to get out of sync, by -2 (one # for the arguments sequence, one for the list inside it). d.addCallback(lambda ign: self.shouldFail(Violation, "one", None, self.rref.callRemote, "bogus", ["one list"])) #d.addCallback(lambda ign: # self.rref.callRemote("set", ["one list"])) # a method call that has no arguments (specifically no REFERENCE # sequences) won't notice the loss of sync d.addCallback(lambda ign: self.rref.callRemote("set", 42)) def _check_42(ign): self.failUnlessEqual(target.obj, 42) d.addCallback(_check_42) # but when the call takes shared arguments, sync matters l = ["list", 1, 2] s = set([3,4]) t = ("tuple", 5, 6) d.addCallback(lambda ign: self.rref.callRemote("set", [t, l, s, t])) def _check_shared(ign): # the off-by-two bug would cause the second tuple shared-ref to # point at the set instead of the first tuple self.failUnlessEqual(type(target.obj), list) one, two, three, four = target.obj self.failUnlessEqual(type(one), tuple) self.failUnlessEqual(one, t) self.failUnlessEqual(type(two), list) self.failUnlessEqual(two, l) self.failUnlessEqual(type(three), set) self.failUnlessEqual(three, s) self.failUnlessEqual(type(four), tuple) # this is where it fails self.failUnlessEqual(four, t) self.failUnlessIdentical(one, four) d.addCallback(_check_shared) return d
def do_system_test(self): self.create_tub() introducer = IntroducerService() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl # we have 5 clients who publish themselves as storage servers, and a # sixth which does which not. All 6 clients subscriber to hear about # storage. When the connections are fully established, all six nodes # should have 5 connections each. NUM_STORAGE = 5 NUM_CLIENTS = 6 clients = [] tubs = {} received_announcements = {} subscribing_clients = [] publishing_clients = [] printable_serverids = {} self.the_introducer = introducer privkeys = {} pubkeys = {} expected_announcements = [0 for c in range(NUM_CLIENTS)] for i in range(NUM_CLIENTS): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) listenOnUnused(tub) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) c = IntroducerClient(tub, self.introducer_furl, NICKNAME % str(i), "version", "oldest", {"component": "component-v1"}, fakeseq, FilePath(self.mktemp())) received_announcements[c] = {} def got(key_s_or_tubid, ann, announcements): index = key_s_or_tubid or get_tubid_string_from_ann(ann) announcements[index] = ann c.subscribe_to("storage", got, received_announcements[c]) subscribing_clients.append(c) expected_announcements[ i] += 1 # all expect a 'storage' announcement node_furl = tub.registerReference(Referenceable()) privkey_s, pubkey_s = keyutil.make_keypair() privkey, _ignored = keyutil.parse_privkey(privkey_s) privkeys[i] = privkey pubkeys[i] = pubkey_s if i < NUM_STORAGE: # sign all announcements c.publish("storage", make_ann(node_furl), privkey) assert pubkey_s.startswith("pub-") printable_serverids[i] = pubkey_s[len("pub-"):] publishing_clients.append(c) else: # the last one does not publish anything pass if i == 2: # also publish something that nobody cares about boring_furl = tub.registerReference(Referenceable()) c.publish("boring", make_ann(boring_furl), privkey) c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub def _wait_for_connected(ign): def _connected(): for c in clients: if not c.connected_to_introducer(): return False return True return self.poll(_connected) # we watch the clients to determine when the system has settled down. # Then we can look inside the server to assert things about its # state. def _wait_for_expected_announcements(ign): def _got_expected_announcements(): for i, c in enumerate(subscribing_clients): if len(received_announcements[c] ) < expected_announcements[i]: return False return True return self.poll(_got_expected_announcements) # before shutting down any Tub, we'd like to know that there are no # messages outstanding def _wait_until_idle(ign): def _idle(): for c in subscribing_clients + publishing_clients: if c._debug_outstanding: return False if self.the_introducer._debug_outstanding: return False return True return self.poll(_idle) d = defer.succeed(None) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check1(res): log.msg("doing _check1") dc = self.the_introducer._debug_counts # each storage server publishes a record. There is also one # "boring" self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE + 1) self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) # the number of outbound messages is tricky.. I think it depends # upon a race between the publish and the subscribe messages. self.failUnless(dc["outbound_message"] > 0) # each client subscribes to "storage", and each server publishes self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE * NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE) anns = received_announcements[c] self.failUnlessEqual(len(anns), NUM_STORAGE) serverid0 = printable_serverids[0] ann = anns[serverid0] nick = ann["nickname"] self.failUnlessEqual(type(nick), unicode) self.failUnlessEqual(nick, NICKNAME % "0") for c in publishing_clients: cdc = c._debug_counts expected = 1 if c in [ clients[2], # boring ]: expected = 2 self.failUnlessEqual(cdc["outbound_message"], expected) # now check the web status, make sure it renders without error ir = introweb.IntroducerRoot(self.parent) self.parent.nodeid = "NODEID" text = ir.renderSynchronously().decode("utf-8") self.failUnlessIn(NICKNAME % "0", text) # a v2 client self.failUnlessIn(NICKNAME % "1", text) # another v2 client for i in range(NUM_STORAGE): self.failUnlessIn(printable_serverids[i], text, (i, printable_serverids[i], text)) # make sure there isn't a double-base32ed string too self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text, (i, printable_serverids[i], text)) log.msg("_check1 done") d.addCallback(_check1) # force an introducer reconnect, by shutting down the Tub it's using # and starting a new Tub (with the old introducer). Everybody should # reconnect and republish, but the introducer should ignore the # republishes as duplicates. However, because the server doesn't know # what each client does and does not know, it will send them a copy # of the current announcement table anyway. d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) def _wait_for_introducer_loss(ign): def _introducer_lost(): for c in clients: if c.connected_to_introducer(): return False return True return self.poll(_introducer_lost) d.addCallback(_wait_for_introducer_loss) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 for k in self.the_introducer._debug_counts: self.the_introducer._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone self.create_tub(self.central_portnum) newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) d.addCallback(lambda _ign: log.msg(" reconnected")) # TODO: publish something while the introducer is offline, then # confirm it gets delivered when the connection is reestablished def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE * NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["inbound_message"], 1) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, # destroying the old introducer, and starting a new Tub+Introducer. # Everybody should reconnect and republish, and the (new) introducer # will distribute the new announcements, but the clients should # ignore the republishes as duplicates. d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) d.addCallback(_wait_for_introducer_loss) d.addCallback(lambda _ign: log.msg("introducer lost")) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone introducer = IntroducerService() self.the_introducer = introducer newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check3(res): log.msg("doing _check3") dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE * NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"] > 0) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check3) return d
def do_system_test(self, create_introducer): self.create_tub() introducer = create_introducer() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl NUMCLIENTS = 5 # we have 5 clients who publish themselves, and an extra one does # which not. When the connections are fully established, all six nodes # should have 5 connections each. clients = [] tubs = {} received_announcements = {} NUM_SERVERS = NUMCLIENTS subscribing_clients = [] publishing_clients = [] for i in range(NUMCLIENTS+1): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) l = tub.listenOn("tcp:0") portnum = l.getPortnum() tub.setLocation("localhost:%d" % portnum) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i, "version", "oldest") received_announcements[c] = {} def got(serverid, ann_d, announcements): announcements[serverid] = ann_d c.subscribe_to("storage", got, received_announcements[c]) subscribing_clients.append(c) if i < NUMCLIENTS: node_furl = tub.registerReference(Referenceable()) c.publish(node_furl, "storage", "ri_name") publishing_clients.append(c) # the last one does not publish anything c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub def _wait_for_all_connections(): for c in subscribing_clients: if len(received_announcements[c]) < NUM_SERVERS: return False return True d = self.poll(_wait_for_all_connections) def _check1(res): log.msg("doing _check1") dc = introducer._debug_counts self.failUnlessEqual(dc["inbound_message"], NUM_SERVERS) self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnless(dc["outbound_message"]) for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], NUM_SERVERS) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], NUM_SERVERS) anns = received_announcements[c] self.failUnlessEqual(len(anns), NUM_SERVERS) nodeid0 = b32decode(tubs[clients[0]].tubID.upper()) ann_d = anns[nodeid0] nick = ann_d["nickname"] self.failUnlessEqual(type(nick), unicode) self.failUnlessEqual(nick, u"nickname-0") for c in publishing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["outbound_message"], 1) d.addCallback(_check1) # force an introducer reconnect, by shutting down the Tub it's using # and starting a new Tub (with the old introducer). Everybody should # reconnect and republish, but the introducer should ignore the # republishes as duplicates. However, because the server doesn't know # what each client does and does not know, it will send them a copy # of the current announcement table anyway. d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) def _wait_for_introducer_loss(): for c in clients: if c.connected_to_introducer(): return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") dc = introducer._debug_counts self.expected_count = dc["inbound_message"] + NUM_SERVERS self.expected_subscribe_count = dc["inbound_subscribe"] + NUMCLIENTS+1 introducer._debug0 = dc["outbound_message"] for c in subscribing_clients: cdc = c._debug_counts c._debug0 = cdc["inbound_message"] self.create_tub(self.central_portnum) newfurl = self.central_tub.registerReference(introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) def _wait_for_introducer_reconnect(): # wait until: # all clients are connected # the introducer has received publish messages from all of them # the introducer has received subscribe messages from all of them # the introducer has sent (duplicate) announcements to all of them # all clients have received (duplicate) announcements dc = introducer._debug_counts for c in clients: if not c.connected_to_introducer(): return False if dc["inbound_message"] < self.expected_count: return False if dc["inbound_subscribe"] < self.expected_subscribe_count: return False for c in subscribing_clients: cdc = c._debug_counts if cdc["inbound_message"] < c._debug0+1: return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect)) def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber dc = introducer._debug_counts self.failUnlessEqual(dc["inbound_message"], 2*NUM_SERVERS) self.failUnlessEqual(dc["inbound_duplicate"], NUM_SERVERS) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnlessEqual(dc["outbound_message"], introducer._debug0 + len(subscribing_clients)) for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["duplicate_announcement"], NUM_SERVERS) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, # destroying the old introducer, and starting a new Tub+Introducer. # Everybody should reconnect and republish, and the (new) introducer # will distribute the new announcements, but the clients should # ignore the republishes as duplicates. d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) for c in subscribing_clients: # record some counters for later comparison. Stash the values # on the client itself, because I'm lazy. cdc = c._debug_counts c._debug1 = cdc["inbound_announcement"] c._debug2 = cdc["inbound_message"] c._debug3 = cdc["new_announcement"] newintroducer = create_introducer() self.expected_message_count = NUM_SERVERS self.expected_announcement_count = NUM_SERVERS*len(subscribing_clients) self.expected_subscribe_count = len(subscribing_clients) newfurl = self.central_tub.registerReference(newintroducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) def _wait_for_introducer_reconnect2(): # wait until: # all clients are connected # the introducer has received publish messages from all of them # the introducer has received subscribe messages from all of them # the introducer has sent announcements for everybody to everybody # all clients have received all the (duplicate) announcements # at that point, the system should be quiescent dc = introducer._debug_counts for c in clients: if not c.connected_to_introducer(): return False if dc["inbound_message"] < self.expected_message_count: return False if dc["outbound_announcements"] < self.expected_announcement_count: return False if dc["inbound_subscribe"] < self.expected_subscribe_count: return False for c in subscribing_clients: cdc = c._debug_counts if cdc["inbound_announcement"] < c._debug1+NUM_SERVERS: return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect2)) def _check3(res): log.msg("doing _check3") for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_announcement"] > c._debug1) self.failUnless(cdc["inbound_message"] > c._debug2) # there should have been no new announcements self.failUnlessEqual(cdc["new_announcement"], c._debug3) # and the right number of duplicate ones. There were # NUM_SERVERS from the servertub restart, and there should be # another NUM_SERVERS now self.failUnlessEqual(cdc["duplicate_announcement"], 2*NUM_SERVERS) d.addCallback(_check3) return d
class Node(service.MultiService): # this implements common functionality of both Client nodes and Introducer # nodes. NODETYPE = "unknown NODETYPE" PORTNUMFILE = None CERTFILE = "node.pem" GENERATED_FILES = [] def __init__(self, basedir=u"."): service.MultiService.__init__(self) self.basedir = abspath_expanduser_unicode(unicode(basedir)) self._portnumfile = os.path.join(self.basedir, self.PORTNUMFILE) fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700) open(os.path.join(self.basedir, "private", "README"), "w").write(PRIV_README) # creates self.config self.read_config() nickname_utf8 = self.get_config("node", "nickname", "<unspecified>") self.nickname = nickname_utf8.decode("utf-8") assert type(self.nickname) is unicode self.init_tempdir() self.create_tub() self.logSource="Node" self.setup_logging() self.log("Node constructed. " + get_package_versions_string()) iputil.increase_rlimits() def init_tempdir(self): tempdir_config = self.get_config("node", "tempdir", "tmp").decode('utf-8') tempdir = abspath_expanduser_unicode(tempdir_config, base=self.basedir) if not os.path.exists(tempdir): fileutil.make_dirs(tempdir) tempfile.tempdir = tempdir # this should cause twisted.web.http (which uses # tempfile.TemporaryFile) to put large request bodies in the given # directory. Without this, the default temp dir is usually /tmp/, # which is frequently too small. test_name = tempfile.mktemp() _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir) @staticmethod def _contains_unescaped_hash(item): characters = iter(item) for c in characters: if c == '\\': characters.next() elif c == '#': return True return False def get_config(self, section, option, default=_None, boolean=False): try: if boolean: return self.config.getboolean(section, option) item = self.config.get(section, option) if option.endswith(".furl") and self._contains_unescaped_hash(item): raise UnescapedHashError(section, option, item) return item except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): if default is _None: fn = os.path.join(self.basedir, u"tahoe.cfg") raise MissingConfigEntry("%s is missing the [%s]%s entry" % (quote_output(fn), section, option)) return default def read_config(self): self.error_about_old_config_files() self.config = ConfigParser.SafeConfigParser() tahoe_cfg = os.path.join(self.basedir, "tahoe.cfg") try: self.config = configutil.get_config(tahoe_cfg) except EnvironmentError: if os.path.exists(tahoe_cfg): raise def error_about_old_config_files(self): """ If any old configuration files are detected, raise OldConfigError. """ oldfnames = set() for name in [ 'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl', 'disconnect_timeout', 'advertised_ip_addresses', 'introducer.furl', 'helper.furl', 'key_generator.furl', 'stats_gatherer.furl', 'no_storage', 'readonly_storage', 'sizelimit', 'debug_discard_storage', 'run_helper']: if name not in self.GENERATED_FILES: fullfname = os.path.join(self.basedir, name) if os.path.exists(fullfname): oldfnames.add(fullfname) if oldfnames: e = OldConfigError(oldfnames) twlog.msg(e) raise e def _convert_tub_port(self, s): if re.search(r'^\d+$', s): return "tcp:%d" % int(s) return s def get_tub_port(self): # return a descriptor string cfg_tubport = self.get_config("node", "tub.port", "") if cfg_tubport: return self._convert_tub_port(cfg_tubport) # For 'tub.port', tahoe.cfg overrides the individual file on disk. So # only read self._portnumfile if tahoe.cfg doesn't provide a value. if os.path.exists(self._portnumfile): file_tubport = fileutil.read(self._portnumfile).strip() return self._convert_tub_port(file_tubport) tubport = "tcp:%d" % iputil.allocate_tcp_port() fileutil.write_atomically(self._portnumfile, tubport + "\n", mode="") return tubport def get_tub_location(self, tubport): location = self.get_config("node", "tub.location", "AUTO") # Replace the location "AUTO", if present, with the detected local # addresses. Don't probe for local addresses unless necessary. split_location = location.split(",") if "AUTO" in split_location: local_addresses = iputil.get_local_addresses_sync() # tubport must be like "tcp:12345" or "tcp:12345:morestuff" local_portnum = int(tubport.split(":")[1]) new_locations = [] for loc in split_location: if loc == "AUTO": new_locations.extend(["tcp:%s:%d" % (ip, local_portnum) for ip in local_addresses]) else: new_locations.append(loc) return ",".join(new_locations) def create_tub(self): certfile = os.path.join(self.basedir, "private", self.CERTFILE) self.tub = Tub(certFile=certfile) self.tub.setOption("logLocalFailures", True) self.tub.setOption("logRemoteFailures", True) self.tub.setOption("expose-remote-exception-types", False) # see #521 for a discussion of how to pick these timeout values. keepalive_timeout_s = self.get_config("node", "timeout.keepalive", "") if keepalive_timeout_s: self.tub.setOption("keepaliveTimeout", int(keepalive_timeout_s)) disconnect_timeout_s = self.get_config("node", "timeout.disconnect", "") if disconnect_timeout_s: # N.B.: this is in seconds, so use "1800" to get 30min self.tub.setOption("disconnectTimeout", int(disconnect_timeout_s)) self.nodeid = b32decode(self.tub.tubID.upper()) # binary format self.write_config("my_nodeid", b32encode(self.nodeid).lower() + "\n") self.short_nodeid = b32encode(self.nodeid).lower()[:8] # ready for printing tubport = self.get_tub_port() if tubport in ("0", "tcp:0"): raise ValueError("tub.port cannot be 0: you must choose") self.tub.listenOn(tubport) location = self.get_tub_location(tubport) self.tub.setLocation(location) self.log("Tub location set to %s" % (location,)) # the Tub is now ready for tub.registerReference() self.tub.setServiceParent(self) def get_app_versions(self): # TODO: merge this with allmydata.get_package_versions return dict(app_versions.versions) def get_config_from_file(self, name, required=False): """Get the (string) contents of a config file, or None if the file did not exist. If required=True, raise an exception rather than returning None. Any leading or trailing whitespace will be stripped from the data.""" fn = os.path.join(self.basedir, name) try: return fileutil.read(fn).strip() except EnvironmentError: if not required: return None raise def write_private_config(self, name, value): """Write the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. """ privname = os.path.join(self.basedir, "private", name) open(privname, "w").write(value) def get_private_config(self, name, default=_None): """Read the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Return a default, or raise an error if one was not given. """ privname = os.path.join(self.basedir, "private", name) try: return fileutil.read(privname).strip() except EnvironmentError: if os.path.exists(privname): raise if default is _None: raise MissingConfigEntry("The required configuration file %s is missing." % (quote_output(privname),)) return default def get_or_create_private_config(self, name, default=_None): """Try to get the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Any leading or trailing whitespace will be stripped from the data. If the file does not exist, and default is not given, report an error. If the file does not exist and a default is specified, try to create it using that default, and then return the value that was written. If 'default' is a string, use it as a default value. If not, treat it as a zero-argument callable that is expected to return a string. """ privname = os.path.join(self.basedir, "private", name) try: value = fileutil.read(privname) except EnvironmentError: if os.path.exists(privname): raise if default is _None: raise MissingConfigEntry("The required configuration file %s is missing." % (quote_output(privname),)) if isinstance(default, basestring): value = default else: value = default() fileutil.write(privname, value) return value.strip() def write_config(self, name, value, mode="w"): """Write a string to a config file.""" fn = os.path.join(self.basedir, name) try: fileutil.write(fn, value, mode) except EnvironmentError, e: self.log("Unable to write config file '%s'" % fn) self.log(e)
class Node(service.MultiService): # this implements common functionality of both Client nodes and Introducer # nodes. NODETYPE = "unknown NODETYPE" PORTNUMFILE = None CERTFILE = "node.pem" def __init__(self, basedir=u"."): service.MultiService.__init__(self) self.basedir = abspath_expanduser_unicode(unicode(basedir)) self._portnumfile = os.path.join(self.basedir, self.PORTNUMFILE) self._tub_ready_observerlist = observer.OneShotObserverList() fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700) open(os.path.join(self.basedir, "private", "README"), "w").write(PRIV_README) # creates self.config, populates from distinct files if necessary self.read_config() nickname_utf8 = self.get_config("node", "nickname", "<unspecified>") self.nickname = nickname_utf8.decode("utf-8") assert type(self.nickname) is unicode self.init_tempdir() self.create_tub() self.logSource="Node" self.setup_ssh() self.setup_logging() self.log("Node constructed. " + get_package_versions_string()) iputil.increase_rlimits() def init_tempdir(self): local_tempdir_utf8 = "tmp" # default is NODEDIR/tmp/ tempdir = self.get_config("node", "tempdir", local_tempdir_utf8).decode('utf-8') tempdir = os.path.join(self.basedir, tempdir) if not os.path.exists(tempdir): fileutil.make_dirs(tempdir) tempfile.tempdir = abspath_expanduser_unicode(tempdir) # this should cause twisted.web.http (which uses # tempfile.TemporaryFile) to put large request bodies in the given # directory. Without this, the default temp dir is usually /tmp/, # which is frequently too small. test_name = tempfile.mktemp() _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir) def get_config(self, section, option, default=_None, boolean=False): try: if boolean: return self.config.getboolean(section, option) return self.config.get(section, option) except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): if default is _None: fn = os.path.join(self.basedir, "tahoe.cfg") raise MissingConfigEntry("%s is missing the [%s]%s entry" % (fn, section, option)) return default def set_config(self, section, option, value): if not self.config.has_section(section): self.config.add_section(section) self.config.set(section, option, value) assert self.config.get(section, option) == value def read_config(self): self.config = ConfigParser.SafeConfigParser() self.config.read([os.path.join(self.basedir, "tahoe.cfg")]) self.read_old_config_files() def read_old_config_files(self): # backwards-compatibility: individual files will override the # contents of tahoe.cfg copy = self._copy_config_from_file copy("nickname", "node", "nickname") copy("webport", "node", "web.port") cfg_tubport = self.get_config("node", "tub.port", "") if not cfg_tubport: # For 'tub.port', tahoe.cfg overrides the individual file on # disk. So only read self._portnumfile if tahoe.cfg doesn't # provide a value. try: file_tubport = open(self._portnumfile, "rU").read().strip() self.set_config("node", "tub.port", file_tubport) except EnvironmentError: pass copy("keepalive_timeout", "node", "timeout.keepalive") copy("disconnect_timeout", "node", "timeout.disconnect") def _copy_config_from_file(self, config_filename, section, keyname): s = self.get_config_from_file(config_filename) if s is not None: self.set_config(section, keyname, s) def create_tub(self): certfile = os.path.join(self.basedir, "private", self.CERTFILE) self.tub = Tub(certFile=certfile) self.tub.setOption("logLocalFailures", True) self.tub.setOption("logRemoteFailures", True) self.tub.setOption("expose-remote-exception-types", False) # see #521 for a discussion of how to pick these timeout values. keepalive_timeout_s = self.get_config("node", "timeout.keepalive", "") if keepalive_timeout_s: self.tub.setOption("keepaliveTimeout", int(keepalive_timeout_s)) disconnect_timeout_s = self.get_config("node", "timeout.disconnect", "") if disconnect_timeout_s: # N.B.: this is in seconds, so use "1800" to get 30min self.tub.setOption("disconnectTimeout", int(disconnect_timeout_s)) self.nodeid = b32decode(self.tub.tubID.upper()) # binary format self.write_config("my_nodeid", b32encode(self.nodeid).lower() + "\n") self.short_nodeid = b32encode(self.nodeid).lower()[:8] # ready for printing tubport = self.get_config("node", "tub.port", "tcp:0") self.tub.listenOn(tubport) # we must wait until our service has started before we can find out # our IP address and thus do tub.setLocation, and we can't register # any services with the Tub until after that point self.tub.setServiceParent(self) def setup_ssh(self): ssh_port = self.get_config("node", "ssh.port", "") if ssh_port: ssh_keyfile = self.get_config("node", "ssh.authorized_keys_file").decode('utf-8') from allmydata import manhole m = manhole.AuthorizedKeysManhole(ssh_port, ssh_keyfile.encode(get_filesystem_encoding())) m.setServiceParent(self) self.log("AuthorizedKeysManhole listening on %s" % ssh_port) def get_app_versions(self): # TODO: merge this with allmydata.get_package_versions return dict(app_versions.versions) def get_config_from_file(self, name, required=False): """Get the (string) contents of a config file, or None if the file did not exist. If required=True, raise an exception rather than returning None. Any leading or trailing whitespace will be stripped from the data.""" fn = os.path.join(self.basedir, name) try: return open(fn, "r").read().strip() except EnvironmentError: if not required: return None raise def write_private_config(self, name, value): """Write the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Any leading or trailing whitespace will be stripped from the data. """ privname = os.path.join(self.basedir, "private", name) open(privname, "w").write(value.strip()) def get_or_create_private_config(self, name, default): """Try to get the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Any leading or trailing whitespace will be stripped from the data. If the file does not exist, try to create it using default, and then return the value that was written. If 'default' is a string, use it as a default value. If not, treat it as a 0-argument callable which is expected to return a string. """ privname = os.path.join("private", name) value = self.get_config_from_file(privname) if value is None: if isinstance(default, (str, unicode)): value = default else: value = default() fn = os.path.join(self.basedir, privname) try: open(fn, "w").write(value) except EnvironmentError, e: self.log("Unable to write config file '%s'" % fn) self.log(e) value = value.strip() return value
class Node(service.MultiService): """ This class implements common functionality of both Client nodes and Introducer nodes. """ NODETYPE = "unknown NODETYPE" CERTFILE = "node.pem" GENERATED_FILES = [] def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider): """ Initialize the node with the given configuration. Its base directory is the current directory by default. """ service.MultiService.__init__(self) self.config = config self.get_config = config.get_config # XXX stopgap self.nickname = config.nickname # XXX stopgap # this can go away once Client.init_client_storage_broker is moved into create_client() # (tests sometimes have None here) self._i2p_provider = i2p_provider self._tor_provider = tor_provider self.init_tempdir() self.create_log_tub() self.logSource = "Node" self.setup_logging() self.tub = main_tub if self.tub is not None: self.nodeid = b32decode(self.tub.tubID.upper()) # binary format self.short_nodeid = b32encode(self.nodeid).lower()[:8] # for printing self.config.write_config_file("my_nodeid", b32encode(self.nodeid).lower() + "\n") self.tub.setServiceParent(self) else: self.nodeid = self.short_nodeid = None self.control_tub = control_tub if self.control_tub is not None: self.control_tub.setServiceParent(self) self.log("Node constructed. " + get_package_versions_string()) iputil.increase_rlimits() def _is_tub_listening(self): """ :returns: True if the main tub is listening """ return len(self.tub.getListeners()) > 0 def init_tempdir(self): """ Initialize/create a directory for temporary files. """ tempdir_config = self.config.get_config("node", "tempdir", "tmp").decode('utf-8') tempdir = self.config.get_config_path(tempdir_config) if not os.path.exists(tempdir): fileutil.make_dirs(tempdir) tempfile.tempdir = tempdir # this should cause twisted.web.http (which uses # tempfile.TemporaryFile) to put large request bodies in the given # directory. Without this, the default temp dir is usually /tmp/, # which is frequently too small. temp_fd, test_name = tempfile.mkstemp() _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir) os.close(temp_fd) # avoid leak of unneeded fd # pull this outside of Node's __init__ too, see: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2948 def create_log_tub(self): # The logport uses a localhost-only ephemeral Tub, with no control # over the listening port or location. This might change if we # discover a compelling reason for it in the future (e.g. being able # to use "flogtool tail" against a remote server), but for now I # think we can live without it. self.log_tub = Tub() portnum = iputil.listenOnUnused(self.log_tub) self.log("Log Tub location set to 127.0.0.1:%s" % (portnum,)) self.log_tub.setServiceParent(self) def startService(self): # Note: this class can be started and stopped at most once. self.log("Node.startService") # Record the process id in the twisted log, after startService() # (__init__ is called before fork(), but startService is called # after). Note that Foolscap logs handle pid-logging by itself, no # need to send a pid to the foolscap log here. twlog.msg("My pid: %s" % os.getpid()) try: os.chmod("twistd.pid", 0o644) except EnvironmentError: pass service.MultiService.startService(self) self.log("%s running" % self.NODETYPE) twlog.msg("%s running" % self.NODETYPE) def stopService(self): self.log("Node.stopService") return service.MultiService.stopService(self) def shutdown(self): """Shut down the node. Returns a Deferred that fires (with None) when it finally stops kicking.""" self.log("Node.shutdown") return self.stopService() def setup_logging(self): # we replace the formatTime() method of the log observer that # twistd set up for us, with a method that uses our preferred # timestamp format. for o in twlog.theLogPublisher.observers: # o might be a FileLogObserver's .emit method if type(o) is type(self.setup_logging): # bound method ob = o.im_self if isinstance(ob, twlog.FileLogObserver): newmeth = types.UnboundMethodType(formatTimeTahoeStyle, ob, ob.__class__) ob.formatTime = newmeth # TODO: twisted >2.5.0 offers maxRotatedFiles=50 lgfurl_file = self.config.get_private_path("logport.furl").encode(get_filesystem_encoding()) if os.path.exists(lgfurl_file): os.remove(lgfurl_file) self.log_tub.setOption("logport-furlfile", lgfurl_file) lgfurl = self.config.get_config("node", "log_gatherer.furl", "") if lgfurl: # this is in addition to the contents of log-gatherer-furlfile self.log_tub.setOption("log-gatherer-furl", lgfurl) self.log_tub.setOption("log-gatherer-furlfile", self.config.get_config_path("log_gatherer.furl")) incident_dir = self.config.get_config_path("logs", "incidents") foolscap.logging.log.setLogDir(incident_dir.encode(get_filesystem_encoding())) twlog.msg("Foolscap logging initialized") twlog.msg("Note to developers: twistd.log does not receive very much.") twlog.msg("Use 'flogtool tail -c NODEDIR/private/logport.furl' instead") twlog.msg("and read docs/logging.rst") def log(self, *args, **kwargs): return log.msg(*args, **kwargs)
class SystemFramework(pollmixin.PollMixin): numnodes = 7 def __init__(self, basedir, mode): self.basedir = basedir = abspath_expanduser_unicode(str(basedir)) if not (basedir + os.path.sep ).startswith(abspath_expanduser_unicode(u".") + os.path.sep): raise AssertionError("safety issue: basedir must be a subdir") self.testdir = testdir = os.path.join(basedir, "test") if os.path.exists(testdir): shutil.rmtree(testdir) fileutil.make_dirs(testdir) self.sparent = service.MultiService() self.sparent.startService() self.proc = None self.tub = Tub() self.tub.setOption("expose-remote-exception-types", False) self.tub.setServiceParent(self.sparent) self.mode = mode self.failed = False self.keepalive_file = None def run(self): framelog = os.path.join(self.basedir, "driver.log") log.startLogging(open(framelog, "a"), setStdout=False) log.msg("CHECK_MEMORY(mode=%s) STARTING" % self.mode) #logfile = open(os.path.join(self.testdir, "log"), "w") #flo = log.FileLogObserver(logfile) #log.startLoggingWithObserver(flo.emit, setStdout=False) d = fireEventually() d.addCallback(lambda res: self.setUp()) d.addCallback(lambda res: self.record_initial_memusage()) d.addCallback(lambda res: self.make_nodes()) d.addCallback(lambda res: self.wait_for_client_connected()) d.addCallback(lambda res: self.do_test()) d.addBoth(self.tearDown) def _err(err): self.failed = err log.err(err) print(err) d.addErrback(_err) def _done(res): reactor.stop() return res d.addBoth(_done) reactor.run() if self.failed: # raiseException doesn't work for CopiedFailures self.failed.raiseException() def setUp(self): #print("STARTING") self.stats = {} self.statsfile = open(os.path.join(self.basedir, "stats.out"), "a") self.make_introducer() d = self.start_client() def _record_control_furl(control_furl): self.control_furl = control_furl #print("OBTAINING '%s'" % (control_furl,)) return self.tub.getReference(self.control_furl) d.addCallback(_record_control_furl) def _record_control(control_rref): self.control_rref = control_rref d.addCallback(_record_control) def _ready(res): #print("CLIENT READY") pass d.addCallback(_ready) return d def record_initial_memusage(self): print() print("Client started (no connections yet)") d = self._print_usage() d.addCallback(self.stash_stats, "init") return d def wait_for_client_connected(self): print() print("Client connecting to other nodes..") return self.control_rref.callRemote("wait_for_client_connections", self.numnodes + 1) def tearDown(self, passthrough): # the client node will shut down in a few seconds #os.remove(os.path.join(self.clientdir, client.Client.EXIT_TRIGGER_FILE)) log.msg("shutting down SystemTest services") if self.keepalive_file and os.path.exists(self.keepalive_file): age = time.time() - os.stat(self.keepalive_file)[stat.ST_MTIME] log.msg("keepalive file at shutdown was %ds old" % age) d = defer.succeed(None) if self.proc: d.addCallback(lambda res: self.kill_client()) d.addCallback(lambda res: self.sparent.stopService()) d.addCallback(lambda res: flushEventualQueue()) def _close_statsfile(res): self.statsfile.close() d.addCallback(_close_statsfile) d.addCallback(lambda res: passthrough) return d def make_introducer(self): iv_basedir = os.path.join(self.testdir, "introducer") os.mkdir(iv_basedir) self.introducer = introducer.IntroducerNode(basedir=iv_basedir) self.introducer.setServiceParent(self) self.introducer_furl = self.introducer.introducer_url def make_nodes(self): self.nodes = [] for i in range(self.numnodes): nodedir = os.path.join(self.testdir, "node%d" % i) os.mkdir(nodedir) f = open(os.path.join(nodedir, "tahoe.cfg"), "w") f.write("[client]\n" "introducer.furl = %s\n" "shares.happy = 1\n" "[storage]\n" % (self.introducer_furl, )) # the only tests for which we want the internal nodes to actually # retain shares are the ones where somebody's going to download # them. if self.mode in ("download", "download-GET", "download-GET-slow"): # retain shares pass else: # for these tests, we tell the storage servers to pretend to # accept shares, but really just throw them out, since we're # only testing upload and not download. f.write("debug_discard = true\n") if self.mode in ("receive", ): # for this mode, the client-under-test gets all the shares, # so our internal nodes can refuse requests f.write("readonly = true\n") f.close() c = client.Client(basedir=nodedir) c.setServiceParent(self) self.nodes.append(c) # the peers will start running, eventually they will connect to each # other and the introducer def touch_keepalive(self): if os.path.exists(self.keepalive_file): age = time.time() - os.stat(self.keepalive_file)[stat.ST_MTIME] log.msg("touching keepalive file, was %ds old" % age) f = open(self.keepalive_file, "w") f.write("""\ If the node notices this file at startup, it will poll every 5 seconds and terminate if the file is more than 10 seconds old, or if it has been deleted. If the test harness has an internal failure and neglects to kill off the node itself, this helps to avoid leaving processes lying around. The contents of this file are ignored. """) f.close() def start_client(self): # this returns a Deferred that fires with the client's control.furl log.msg("MAKING CLIENT") # self.testdir is an absolute Unicode path clientdir = self.clientdir = os.path.join(self.testdir, u"client") clientdir_str = clientdir.encode(get_filesystem_encoding()) quiet = StringIO() create_node.create_node({'basedir': clientdir}, out=quiet) log.msg("DONE MAKING CLIENT") # now replace tahoe.cfg # set webport=0 and then ask the node what port it picked. f = open(os.path.join(clientdir, "tahoe.cfg"), "w") f.write("[node]\n" "web.port = tcp:0:interface=127.0.0.1\n" "[client]\n" "introducer.furl = %s\n" "shares.happy = 1\n" "[storage]\n" % (self.introducer_furl, )) if self.mode in ("upload-self", "receive"): # accept and store shares, to trigger the memory consumption bugs pass else: # don't accept any shares f.write("readonly = true\n") ## also, if we do receive any shares, throw them away #f.write("debug_discard = true") if self.mode == "upload-self": pass f.close() self.keepalive_file = os.path.join(clientdir, client.Client.EXIT_TRIGGER_FILE) # now start updating the mtime. self.touch_keepalive() ts = internet.TimerService(1.0, self.touch_keepalive) ts.setServiceParent(self.sparent) pp = ClientWatcher() self.proc_done = pp.d = defer.Deferred() logfile = os.path.join(self.basedir, "client.log") tahoes = procutils.which("tahoe") if not tahoes: raise RuntimeError("unable to find a 'tahoe' executable") cmd = [tahoes[0], "run", ".", "-l", logfile] env = os.environ.copy() self.proc = reactor.spawnProcess(pp, cmd[0], cmd, env, path=clientdir_str) log.msg("CLIENT STARTED") # now we wait for the client to get started. we're looking for the # control.furl file to appear. furl_file = os.path.join(clientdir, "private", "control.furl") url_file = os.path.join(clientdir, "node.url") def _check(): if pp.ended and pp.ended.value.status != 0: # the twistd process ends normally (with rc=0) if the child # is successfully launched. It ends abnormally (with rc!=0) # if the child cannot be launched. raise ChildDidNotStartError( "process ended while waiting for startup") return os.path.exists(furl_file) d = self.poll(_check, 0.1) # once it exists, wait a moment before we read from it, just in case # it hasn't finished writing the whole thing. Ideally control.furl # would be created in some atomic fashion, or made non-readable until # it's ready, but I can't think of an easy way to do that, and I # think the chances that we'll observe a half-write are pretty low. def _stall(res): d2 = defer.Deferred() reactor.callLater(0.1, d2.callback, None) return d2 d.addCallback(_stall) def _read(res): # read the node's URL self.webish_url = open(url_file, "r").read().strip() if self.webish_url[-1] == "/": # trim trailing slash, since the rest of the code wants it gone self.webish_url = self.webish_url[:-1] f = open(furl_file, "r") furl = f.read() return furl.strip() d.addCallback(_read) return d def kill_client(self): # returns a Deferred that fires when the process exits. This may only # be called once. try: self.proc.signalProcess("INT") except error.ProcessExitedAlready: pass return self.proc_done def create_data(self, name, size): filename = os.path.join(self.testdir, name + ".data") f = open(filename, "wb") block = "a" * 8192 while size > 0: l = min(size, 8192) f.write(block[:l]) size -= l return filename def stash_stats(self, stats, name): self.statsfile.write("%s %s: %d\n" % (self.mode, name, stats['VmPeak'])) self.statsfile.flush() self.stats[name] = stats['VmPeak'] def POST(self, urlpath, **fields): url = self.webish_url + urlpath sepbase = "boogabooga" sep = "--" + sepbase form = [] form.append(sep) form.append('Content-Disposition: form-data; name="_charset"') form.append('') form.append('UTF-8') form.append(sep) for name, value in fields.iteritems(): if isinstance(value, tuple): filename, value = value form.append('Content-Disposition: form-data; name="%s"; ' 'filename="%s"' % (name, filename)) else: form.append('Content-Disposition: form-data; name="%s"' % name) form.append('') form.append(value) form.append(sep) form[-1] += "--" body = "\r\n".join(form) + "\r\n" headers = { "content-type": "multipart/form-data; boundary=%s" % sepbase, } return tw_client.getPage(url, method="POST", postdata=body, headers=headers, followRedirect=False) def GET_discard(self, urlpath, stall): url = self.webish_url + urlpath + "?filename=dummy-get.out" return discardPage(url, stall) def _print_usage(self, res=None): d = self.control_rref.callRemote("get_memory_usage") def _print(stats): print("VmSize: %9d VmPeak: %9d" % (stats["VmSize"], stats["VmPeak"])) return stats d.addCallback(_print) return d def _do_upload(self, res, size, files, uris): name = '%d' % size print() print("uploading %s" % name) if self.mode in ("upload", "upload-self"): d = self.control_rref.callRemote("upload_random_data_from_file", size, convergence="check-memory") elif self.mode == "upload-POST": data = "a" * size url = "/uri" d = self.POST(url, t="upload", file=("%d.data" % size, data)) elif self.mode in ("receive", "download", "download-GET", "download-GET-slow"): # mode=receive: upload the data from a local peer, so that the # client-under-test receives and stores the shares # # mode=download*: upload the data from a local peer, then have # the client-under-test download it. # # we need to wait until the uploading node has connected to all # peers, since the wait_for_client_connections() above doesn't # pay attention to our self.nodes[] and their connections. files[name] = self.create_data(name, size) u = self.nodes[0].getServiceNamed("uploader") d = self.nodes[0].debug_wait_for_client_connections(self.numnodes + 1) d.addCallback(lambda res: u.upload( upload.FileName(files[name], convergence="check-memory"))) d.addCallback(lambda results: results.get_uri()) else: raise ValueError("unknown mode=%s" % self.mode) def _complete(uri): uris[name] = uri print("uploaded %s" % name) d.addCallback(_complete) return d def _do_download(self, res, size, uris): if self.mode not in ("download", "download-GET", "download-GET-slow"): return name = '%d' % size print("downloading %s" % name) uri = uris[name] if self.mode == "download": d = self.control_rref.callRemote("download_to_tempfile_and_delete", uri) elif self.mode == "download-GET": url = "/uri/%s" % uri d = self.GET_discard(urllib.quote(url), stall=False) elif self.mode == "download-GET-slow": url = "/uri/%s" % uri d = self.GET_discard(urllib.quote(url), stall=True) def _complete(res): print("downloaded %s" % name) return res d.addCallback(_complete) return d def do_test(self): #print("CLIENT STARTED") #print("FURL", self.control_furl) #print("RREF", self.control_rref) #print() kB = 1000 MB = 1000 * 1000 files = {} uris = {} d = self._print_usage() d.addCallback(self.stash_stats, "0B") for i in range(10): d.addCallback(self._do_upload, 10 * kB + i, files, uris) d.addCallback(self._do_download, 10 * kB + i, uris) d.addCallback(self._print_usage) d.addCallback(self.stash_stats, "10kB") for i in range(3): d.addCallback(self._do_upload, 10 * MB + i, files, uris) d.addCallback(self._do_download, 10 * MB + i, uris) d.addCallback(self._print_usage) d.addCallback(self.stash_stats, "10MB") for i in range(1): d.addCallback(self._do_upload, 50 * MB + i, files, uris) d.addCallback(self._do_download, 50 * MB + i, uris) d.addCallback(self._print_usage) d.addCallback(self.stash_stats, "50MB") #for i in range(1): # d.addCallback(self._do_upload, 100*MB+i, files, uris) # d.addCallback(self._do_download, 100*MB+i, uris) # d.addCallback(self._print_usage) #d.addCallback(self.stash_stats, "100MB") #d.addCallback(self.stall) def _done(res): print("FINISHING") d.addCallback(_done) return d def stall(self, res): d = defer.Deferred() reactor.callLater(5, d.callback, None) return d
class SpeedTest: DO_IMMUTABLE = True DO_MUTABLE_CREATE = True DO_MUTABLE = True def __init__(self, test_client_dir): #self.real_stderr = sys.stderr log.startLogging(open("st.log", "a"), setStdout=False) f = open(os.path.join(test_client_dir, "private", "control.furl"), "r") self.control_furl = f.read().strip() f.close() self.base_service = service.MultiService() self.failed = None self.upload_times = {} self.download_times = {} def run(self): print("STARTING") d = fireEventually() d.addCallback(lambda res: self.setUp()) d.addCallback(lambda res: self.do_test()) d.addBoth(self.tearDown) def _err(err): self.failed = err log.err(err) print(err) d.addErrback(_err) def _done(res): reactor.stop() return res d.addBoth(_done) reactor.run() if self.failed: print("EXCEPTION") print(self.failed) sys.exit(1) def setUp(self): self.base_service.startService() self.tub = Tub() self.tub.setOption("expose-remote-exception-types", False) self.tub.setServiceParent(self.base_service) d = self.tub.getReference(self.control_furl) def _gotref(rref): self.client_rref = rref print("Got Client Control reference") return self.stall(5) d.addCallback(_gotref) return d def stall(self, delay, result=None): d = defer.Deferred() reactor.callLater(delay, d.callback, result) return d def record_times(self, times, key): print("TIME (%s): %s up, %s down" % (key, times[0], times[1])) self.upload_times[key], self.download_times[key] = times def one_test(self, res, name, count, size, mutable): # values for 'mutable': # False (upload a different CHK file for each 'count') # "create" (upload different contents into a new SSK file) # "upload" (upload different contents into the same SSK file. The # time consumed does not include the creation of the file) d = self.client_rref.callRemote("speed_test", count, size, mutable) d.addCallback(self.record_times, name) return d def measure_rtt(self, res): # use RIClient.get_nodeid() to measure the foolscap-level RTT d = self.client_rref.callRemote("measure_peer_response_time") def _got(res): assert len(res) # need at least one peer times = res.values() self.total_rtt = sum(times) self.average_rtt = sum(times) / len(times) self.max_rtt = max(times) print("num-peers: %d" % len(times)) print("total-RTT: %f" % self.total_rtt) print("average-RTT: %f" % self.average_rtt) print("max-RTT: %f" % self.max_rtt) d.addCallback(_got) return d def do_test(self): print("doing test") d = defer.succeed(None) d.addCallback(self.one_test, "startup", 1, 1000, False) #ignore this one d.addCallback(self.measure_rtt) if self.DO_IMMUTABLE: # immutable files d.addCallback(self.one_test, "1x 200B", 1, 200, False) d.addCallback(self.one_test, "10x 200B", 10, 200, False) def _maybe_do_100x_200B(res): if self.upload_times["10x 200B"] < 5: print("10x 200B test went too fast, doing 100x 200B test") return self.one_test(None, "100x 200B", 100, 200, False) return d.addCallback(_maybe_do_100x_200B) d.addCallback(self.one_test, "1MB", 1, 1*MB, False) d.addCallback(self.one_test, "10MB", 1, 10*MB, False) def _maybe_do_100MB(res): if self.upload_times["10MB"] > 30: print("10MB test took too long, skipping 100MB test") return return self.one_test(None, "100MB", 1, 100*MB, False) d.addCallback(_maybe_do_100MB) if self.DO_MUTABLE_CREATE: # mutable file creation d.addCallback(self.one_test, "10x 200B SSK creation", 10, 200, "create") if self.DO_MUTABLE: # mutable file upload/download d.addCallback(self.one_test, "10x 200B SSK", 10, 200, "upload") def _maybe_do_100x_200B_SSK(res): if self.upload_times["10x 200B SSK"] < 5: print("10x 200B SSK test went too fast, doing 100x 200B SSK") return self.one_test(None, "100x 200B SSK", 100, 200, "upload") return d.addCallback(_maybe_do_100x_200B_SSK) d.addCallback(self.one_test, "1MB SSK", 1, 1*MB, "upload") d.addCallback(self.calculate_speeds) return d def calculate_speeds(self, res): # time = A*size+B # we assume that A*200bytes is negligible if self.DO_IMMUTABLE: # upload if "100x 200B" in self.upload_times: B = self.upload_times["100x 200B"] / 100 else: B = self.upload_times["10x 200B"] / 10 print("upload per-file time: %.3fs" % B) print("upload per-file times-avg-RTT: %f" % (B / self.average_rtt)) print("upload per-file times-total-RTT: %f" % (B / self.total_rtt)) A1 = 1*MB / (self.upload_times["1MB"] - B) # in bytes per second print("upload speed (1MB):", self.number(A1, "Bps")) A2 = 10*MB / (self.upload_times["10MB"] - B) print("upload speed (10MB):", self.number(A2, "Bps")) if "100MB" in self.upload_times: A3 = 100*MB / (self.upload_times["100MB"] - B) print("upload speed (100MB):", self.number(A3, "Bps")) # download if "100x 200B" in self.download_times: B = self.download_times["100x 200B"] / 100 else: B = self.download_times["10x 200B"] / 10 print("download per-file time: %.3fs" % B) print("download per-file times-avg-RTT: %f" % (B / self.average_rtt)) print("download per-file times-total-RTT: %f" % (B / self.total_rtt)) A1 = 1*MB / (self.download_times["1MB"] - B) # in bytes per second print("download speed (1MB):", self.number(A1, "Bps")) A2 = 10*MB / (self.download_times["10MB"] - B) print("download speed (10MB):", self.number(A2, "Bps")) if "100MB" in self.download_times: A3 = 100*MB / (self.download_times["100MB"] - B) print("download speed (100MB):", self.number(A3, "Bps")) if self.DO_MUTABLE_CREATE: # SSK creation B = self.upload_times["10x 200B SSK creation"] / 10 print("create per-file time SSK: %.3fs" % B) if self.DO_MUTABLE: # upload SSK if "100x 200B SSK" in self.upload_times: B = self.upload_times["100x 200B SSK"] / 100 else: B = self.upload_times["10x 200B SSK"] / 10 print("upload per-file time SSK: %.3fs" % B) A1 = 1*MB / (self.upload_times["1MB SSK"] - B) # in bytes per second print("upload speed SSK (1MB):", self.number(A1, "Bps")) # download SSK if "100x 200B SSK" in self.download_times: B = self.download_times["100x 200B SSK"] / 100 else: B = self.download_times["10x 200B SSK"] / 10 print("download per-file time SSK: %.3fs" % B) A1 = 1*MB / (self.download_times["1MB SSK"] - B) # in bytes per # second print("download speed SSK (1MB):", self.number(A1, "Bps")) def number(self, value, suffix=""): scaling = 1 if value < 1: fmt = "%1.2g%s" elif value < 100: fmt = "%.1f%s" elif value < 1000: fmt = "%d%s" elif value < 1e6: fmt = "%.2fk%s"; scaling = 1e3 elif value < 1e9: fmt = "%.2fM%s"; scaling = 1e6 elif value < 1e12: fmt = "%.2fG%s"; scaling = 1e9 elif value < 1e15: fmt = "%.2fT%s"; scaling = 1e12 elif value < 1e18: fmt = "%.2fP%s"; scaling = 1e15 else: fmt = "huge! %g%s" return fmt % (value / scaling, suffix) def tearDown(self, res): d = self.base_service.stopService() d.addCallback(lambda ignored: res) return d
class ReferenceCounting(ShouldFailMixin, unittest.TestCase): def setUp(self): self.s = service.MultiService() self.s.startService() self.target_tub = Tub() self.target_tub.setServiceParent(self.s) l = self.target_tub.listenOn("tcp:0:interface=127.0.0.1") self.target_tub.setLocation("127.0.0.1:%d" % l.getPortnum()) self.source_tub = Tub() self.source_tub.setServiceParent(self.s) def tearDown(self): return self.s.stopService() def setupTarget(self, target): furl = self.target_tub.registerReference(target) d = self.source_tub.getReference(furl) return d def test_reference_counting(self): self.source_tub.setOption("expose-remote-exception-types", True) target = HelperTarget() d = self.setupTarget(target) def _stash(rref): # to exercise bug #104, we need to trigger remote Violations, so # we tell the sending side to not use a RemoteInterface. We do # this by reaching inside the RemoteReference and making it # forget rref.tracker.interfaceName = None rref.tracker.interface = None self.rref = rref d.addCallback(_stash) # the first call causes an error, which discards all remaining # tokens, including the OPEN tokens for the arguments. The #104 bug # is that this causes the open-count to get out of sync, by -2 (one # for the arguments sequence, one for the list inside it). d.addCallback( lambda ign: self.shouldFail(Violation, "one", None, self.rref. callRemote, "bogus", ["one list"])) #d.addCallback(lambda ign: # self.rref.callRemote("set", ["one list"])) # a method call that has no arguments (specifically no REFERENCE # sequences) won't notice the loss of sync d.addCallback(lambda ign: self.rref.callRemote("set", 42)) def _check_42(ign): self.failUnlessEqual(target.obj, 42) d.addCallback(_check_42) # but when the call takes shared arguments, sync matters l = ["list", 1, 2] s = set([3, 4]) t = ("tuple", 5, 6) d.addCallback(lambda ign: self.rref.callRemote("set", [t, l, s, t])) def _check_shared(ign): # the off-by-two bug would cause the second tuple shared-ref to # point at the set instead of the first tuple self.failUnlessEqual(type(target.obj), list) one, two, three, four = target.obj self.failUnlessEqual(type(one), tuple) self.failUnlessEqual(one, t) self.failUnlessEqual(type(two), list) self.failUnlessEqual(two, l) self.failUnlessEqual(type(three), set) self.failUnlessEqual(three, s) self.failUnlessEqual(type(four), tuple) # this is where it fails self.failUnlessEqual(four, t) self.failUnlessIdentical(one, four) d.addCallback(_check_shared) return d
class Node(service.MultiService): # this implements common functionality of both Client nodes and Introducer # nodes. NODETYPE = "unknown NODETYPE" PORTNUMFILE = None CERTFILE = "node.pem" def __init__(self, basedir=u"."): service.MultiService.__init__(self) self.basedir = abspath_expanduser_unicode(unicode(basedir)) self._portnumfile = os.path.join(self.basedir, self.PORTNUMFILE) self._tub_ready_observerlist = observer.OneShotObserverList() fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700) open(os.path.join(self.basedir, "private", "README"), "w").write(PRIV_README) # creates self.config, populates from distinct files if necessary self.read_config() nickname_utf8 = self.get_config("node", "nickname", "<unspecified>") self.nickname = nickname_utf8.decode("utf-8") assert type(self.nickname) is unicode self.init_tempdir() self.create_tub() self.logSource = "Node" self.setup_ssh() self.setup_logging() self.log("Node constructed. " + get_package_versions_string()) iputil.increase_rlimits() def init_tempdir(self): local_tempdir_utf8 = "tmp" # default is NODEDIR/tmp/ tempdir = self.get_config("node", "tempdir", local_tempdir_utf8).decode('utf-8') tempdir = os.path.join(self.basedir, tempdir) if not os.path.exists(tempdir): fileutil.make_dirs(tempdir) tempfile.tempdir = abspath_expanduser_unicode(tempdir) # this should cause twisted.web.http (which uses # tempfile.TemporaryFile) to put large request bodies in the given # directory. Without this, the default temp dir is usually /tmp/, # which is frequently too small. test_name = tempfile.mktemp() _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir) def get_config(self, section, option, default=_None, boolean=False): try: if boolean: return self.config.getboolean(section, option) return self.config.get(section, option) except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): if default is _None: fn = os.path.join(self.basedir, "tahoe.cfg") raise MissingConfigEntry("%s is missing the [%s]%s entry" % (fn, section, option)) return default def set_config(self, section, option, value): if not self.config.has_section(section): self.config.add_section(section) self.config.set(section, option, value) assert self.config.get(section, option) == value def read_config(self): self.config = ConfigParser.SafeConfigParser() self.config.read([os.path.join(self.basedir, "tahoe.cfg")]) self.read_old_config_files() def read_old_config_files(self): # backwards-compatibility: individual files will override the # contents of tahoe.cfg copy = self._copy_config_from_file copy("nickname", "node", "nickname") copy("webport", "node", "web.port") cfg_tubport = self.get_config("node", "tub.port", "") if not cfg_tubport: # For 'tub.port', tahoe.cfg overrides the individual file on # disk. So only read self._portnumfile if tahoe.cfg doesn't # provide a value. try: file_tubport = open(self._portnumfile, "rU").read().strip() self.set_config("node", "tub.port", file_tubport) except EnvironmentError: pass copy("keepalive_timeout", "node", "timeout.keepalive") copy("disconnect_timeout", "node", "timeout.disconnect") def _copy_config_from_file(self, config_filename, section, keyname): s = self.get_config_from_file(config_filename) if s is not None: self.set_config(section, keyname, s) def create_tub(self): certfile = os.path.join(self.basedir, "private", self.CERTFILE) self.tub = Tub(certFile=certfile) self.tub.setOption("logLocalFailures", True) self.tub.setOption("logRemoteFailures", True) self.tub.setOption("expose-remote-exception-types", False) # see #521 for a discussion of how to pick these timeout values. keepalive_timeout_s = self.get_config("node", "timeout.keepalive", "") if keepalive_timeout_s: self.tub.setOption("keepaliveTimeout", int(keepalive_timeout_s)) disconnect_timeout_s = self.get_config("node", "timeout.disconnect", "") if disconnect_timeout_s: # N.B.: this is in seconds, so use "1800" to get 30min self.tub.setOption("disconnectTimeout", int(disconnect_timeout_s)) self.nodeid = b32decode(self.tub.tubID.upper()) # binary format self.write_config("my_nodeid", b32encode(self.nodeid).lower() + "\n") self.short_nodeid = b32encode( self.nodeid).lower()[:8] # ready for printing tubport = self.get_config("node", "tub.port", "tcp:0") self.tub.listenOn(tubport) # we must wait until our service has started before we can find out # our IP address and thus do tub.setLocation, and we can't register # any services with the Tub until after that point self.tub.setServiceParent(self) def setup_ssh(self): ssh_port = self.get_config("node", "ssh.port", "") if ssh_port: ssh_keyfile = self.get_config( "node", "ssh.authorized_keys_file").decode('utf-8') from allmydata import manhole m = manhole.AuthorizedKeysManhole( ssh_port, ssh_keyfile.encode(get_filesystem_encoding())) m.setServiceParent(self) self.log("AuthorizedKeysManhole listening on %s" % ssh_port) def get_app_versions(self): # TODO: merge this with allmydata.get_package_versions return dict(app_versions.versions) def get_config_from_file(self, name, required=False): """Get the (string) contents of a config file, or None if the file did not exist. If required=True, raise an exception rather than returning None. Any leading or trailing whitespace will be stripped from the data.""" fn = os.path.join(self.basedir, name) try: return open(fn, "r").read().strip() except EnvironmentError: if not required: return None raise def write_private_config(self, name, value): """Write the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Any leading or trailing whitespace will be stripped from the data. """ privname = os.path.join(self.basedir, "private", name) open(privname, "w").write(value.strip()) def get_or_create_private_config(self, name, default): """Try to get the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Any leading or trailing whitespace will be stripped from the data. If the file does not exist, try to create it using default, and then return the value that was written. If 'default' is a string, use it as a default value. If not, treat it as a 0-argument callable which is expected to return a string. """ privname = os.path.join("private", name) value = self.get_config_from_file(privname) if value is None: if isinstance(default, (str, unicode)): value = default else: value = default() fn = os.path.join(self.basedir, privname) try: open(fn, "w").write(value) except EnvironmentError, e: self.log("Unable to write config file '%s'" % fn) self.log(e) value = value.strip() return value
class NativeStorageServer(service.MultiService): """I hold information about a storage server that we want to connect to. If we are connected, I hold the RemoteReference, their host address, and the their version information. I remember information about when we were last connected too, even if we aren't currently connected. @ivar last_connect_time: when we last established a connection @ivar last_loss_time: when we last lost a connection @ivar version: the server's versiondict, from the most recent announcement @ivar nickname: the server's self-reported nickname (unicode), same @ivar rref: the RemoteReference, if connected, otherwise None @ivar remote_host: the IAddress, if connected, otherwise None """ implements(IServer) VERSION_DEFAULTS = { "http://allmydata.org/tahoe/protocols/storage/v1" : { "maximum-immutable-share-size": 2**32 - 1, "maximum-mutable-share-size": 2*1000*1000*1000, # maximum prior to v1.9.2 "tolerates-immutable-read-overrun": False, "delete-mutable-shares-with-zero-length-writev": False, "available-space": None, }, "application-version": "unknown: no get_version()", } def __init__(self, key_s, ann, tub_options={}): service.MultiService.__init__(self) self.key_s = key_s self.announcement = ann self._tub_options = tub_options assert "anonymous-storage-FURL" in ann, ann furl = str(ann["anonymous-storage-FURL"]) m = re.match(r'pb://(\w+)@', furl) assert m, furl tubid_s = m.group(1).lower() self._tubid = base32.a2b(tubid_s) assert "permutation-seed-base32" in ann, ann ps = base32.a2b(str(ann["permutation-seed-base32"])) self._permutation_seed = ps if key_s: self._long_description = key_s if key_s.startswith("v0-"): # remove v0- prefix from abbreviated name self._short_description = key_s[3:3+8] else: self._short_description = key_s[:8] else: self._long_description = tubid_s self._short_description = tubid_s[:6] self.last_connect_time = None self.last_loss_time = None self.remote_host = None self.rref = None self._is_connected = False self._reconnector = None self._trigger_cb = None self._on_status_changed = ObserverList() def on_status_changed(self, status_changed): """ :param status_changed: a callable taking a single arg (the NativeStorageServer) that is notified when we become connected """ return self._on_status_changed.subscribe(status_changed) # Special methods used by copy.copy() and copy.deepcopy(). When those are # used in allmydata.immutable.filenode to copy CheckResults during # repair, we want it to treat the IServer instances as singletons, and # not attempt to duplicate them.. def __copy__(self): return self def __deepcopy__(self, memodict): return self def __repr__(self): return "<NativeStorageServer for %s>" % self.get_name() def get_serverid(self): return self._tubid # XXX replace with self.key_s def get_permutation_seed(self): return self._permutation_seed def get_version(self): if self.rref: return self.rref.version return None def get_name(self): # keep methodname short # TODO: decide who adds [] in the short description. It should # probably be the output side, not here. return self._short_description def get_longname(self): return self._long_description def get_lease_seed(self): return self._tubid def get_foolscap_write_enabler_seed(self): return self._tubid def get_nickname(self): return self.announcement["nickname"] def get_announcement(self): return self.announcement def get_remote_host(self): return self.remote_host def is_connected(self): return self._is_connected def get_last_connect_time(self): return self.last_connect_time def get_last_loss_time(self): return self.last_loss_time def get_last_received_data_time(self): if self.rref is None: return None else: return self.rref.getDataLastReceivedAt() def get_available_space(self): version = self.get_version() if version is None: return None protocol_v1_version = version.get('http://allmydata.org/tahoe/protocols/storage/v1', {}) available_space = protocol_v1_version.get('available-space') if available_space is None: available_space = protocol_v1_version.get('maximum-immutable-share-size', None) return available_space def start_connecting(self, trigger_cb): self._tub = Tub() for (name, value) in self._tub_options.items(): self._tub.setOption(name, value) self._tub.setServiceParent(self) furl = str(self.announcement["anonymous-storage-FURL"]) self._trigger_cb = trigger_cb self._reconnector = self._tub.connectTo(furl, self._got_connection) def _got_connection(self, rref): lp = log.msg(format="got connection to %(name)s, getting versions", name=self.get_name(), facility="tahoe.storage_broker", umid="coUECQ") if self._trigger_cb: eventually(self._trigger_cb) default = self.VERSION_DEFAULTS d = add_version_to_remote_reference(rref, default) d.addCallback(self._got_versioned_service, lp) d.addCallback(lambda ign: self._on_status_changed.notify(self)) d.addErrback(log.err, format="storageclient._got_connection", name=self.get_name(), umid="Sdq3pg") def _got_versioned_service(self, rref, lp): log.msg(format="%(name)s provided version info %(version)s", name=self.get_name(), version=rref.version, facility="tahoe.storage_broker", umid="SWmJYg", level=log.NOISY, parent=lp) self.last_connect_time = time.time() self.remote_host = rref.getPeer() self.rref = rref self._is_connected = True rref.notifyOnDisconnect(self._lost) def get_rref(self): return self.rref def _lost(self): log.msg(format="lost connection to %(name)s", name=self.get_name(), facility="tahoe.storage_broker", umid="zbRllw") self.last_loss_time = time.time() # self.rref is now stale: all callRemote()s will get a # DeadReferenceError. We leave the stale reference in place so that # uploader/downloader code (which received this IServer through # get_connected_servers() or get_servers_for_psi()) can continue to # use s.get_rref().callRemote() and not worry about it being None. self._is_connected = False self.remote_host = None def stop_connecting(self): # used when this descriptor has been superceded by another self._reconnector.stopConnecting() def try_to_connect(self): # used when the broker wants us to hurry up self._reconnector.reset()
def do_system_test(self, create_introducer): self.create_tub() introducer = create_introducer() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl NUMCLIENTS = 5 # we have 5 clients who publish themselves, and an extra one does # which not. When the connections are fully established, all six nodes # should have 5 connections each. clients = [] tubs = {} received_announcements = {} NUM_SERVERS = NUMCLIENTS subscribing_clients = [] publishing_clients = [] for i in range(NUMCLIENTS + 1): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) l = tub.listenOn("tcp:0") portnum = l.getPortnum() tub.setLocation("localhost:%d" % portnum) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i, "version", "oldest") received_announcements[c] = {} def got(serverid, ann_d, announcements): announcements[serverid] = ann_d c.subscribe_to("storage", got, received_announcements[c]) subscribing_clients.append(c) if i < NUMCLIENTS: node_furl = tub.registerReference(Referenceable()) c.publish(node_furl, "storage", "ri_name") publishing_clients.append(c) # the last one does not publish anything c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub def _wait_for_all_connections(): for c in subscribing_clients: if len(received_announcements[c]) < NUM_SERVERS: return False return True d = self.poll(_wait_for_all_connections) def _check1(res): log.msg("doing _check1") dc = introducer._debug_counts self.failUnlessEqual(dc["inbound_message"], NUM_SERVERS) self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnless(dc["outbound_message"]) for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], NUM_SERVERS) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], NUM_SERVERS) anns = received_announcements[c] self.failUnlessEqual(len(anns), NUM_SERVERS) nodeid0 = b32decode(tubs[clients[0]].tubID.upper()) ann_d = anns[nodeid0] nick = ann_d["nickname"] self.failUnlessEqual(type(nick), unicode) self.failUnlessEqual(nick, u"nickname-0") for c in publishing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["outbound_message"], 1) d.addCallback(_check1) # force an introducer reconnect, by shutting down the Tub it's using # and starting a new Tub (with the old introducer). Everybody should # reconnect and republish, but the introducer should ignore the # republishes as duplicates. However, because the server doesn't know # what each client does and does not know, it will send them a copy # of the current announcement table anyway. d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) def _wait_for_introducer_loss(): for c in clients: if c.connected_to_introducer(): return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") dc = introducer._debug_counts self.expected_count = dc["inbound_message"] + NUM_SERVERS self.expected_subscribe_count = dc[ "inbound_subscribe"] + NUMCLIENTS + 1 introducer._debug0 = dc["outbound_message"] for c in subscribing_clients: cdc = c._debug_counts c._debug0 = cdc["inbound_message"] self.create_tub(self.central_portnum) newfurl = self.central_tub.registerReference(introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) def _wait_for_introducer_reconnect(): # wait until: # all clients are connected # the introducer has received publish messages from all of them # the introducer has received subscribe messages from all of them # the introducer has sent (duplicate) announcements to all of them # all clients have received (duplicate) announcements dc = introducer._debug_counts for c in clients: if not c.connected_to_introducer(): return False if dc["inbound_message"] < self.expected_count: return False if dc["inbound_subscribe"] < self.expected_subscribe_count: return False for c in subscribing_clients: cdc = c._debug_counts if cdc["inbound_message"] < c._debug0 + 1: return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect)) def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber dc = introducer._debug_counts self.failUnlessEqual(dc["inbound_message"], 2 * NUM_SERVERS) self.failUnlessEqual(dc["inbound_duplicate"], NUM_SERVERS) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnlessEqual(dc["outbound_message"], introducer._debug0 + len(subscribing_clients)) for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["duplicate_announcement"], NUM_SERVERS) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, # destroying the old introducer, and starting a new Tub+Introducer. # Everybody should reconnect and republish, and the (new) introducer # will distribute the new announcements, but the clients should # ignore the republishes as duplicates. d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) d.addCallback(lambda res: self.poll(_wait_for_introducer_loss)) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) for c in subscribing_clients: # record some counters for later comparison. Stash the values # on the client itself, because I'm lazy. cdc = c._debug_counts c._debug1 = cdc["inbound_announcement"] c._debug2 = cdc["inbound_message"] c._debug3 = cdc["new_announcement"] newintroducer = create_introducer() self.expected_message_count = NUM_SERVERS self.expected_announcement_count = NUM_SERVERS * len( subscribing_clients) self.expected_subscribe_count = len(subscribing_clients) newfurl = self.central_tub.registerReference(newintroducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) def _wait_for_introducer_reconnect2(): # wait until: # all clients are connected # the introducer has received publish messages from all of them # the introducer has received subscribe messages from all of them # the introducer has sent announcements for everybody to everybody # all clients have received all the (duplicate) announcements # at that point, the system should be quiescent dc = introducer._debug_counts for c in clients: if not c.connected_to_introducer(): return False if dc["inbound_message"] < self.expected_message_count: return False if dc["outbound_announcements"] < self.expected_announcement_count: return False if dc["inbound_subscribe"] < self.expected_subscribe_count: return False for c in subscribing_clients: cdc = c._debug_counts if cdc["inbound_announcement"] < c._debug1 + NUM_SERVERS: return False return True d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect2)) def _check3(res): log.msg("doing _check3") for c in clients: self.failUnless(c.connected_to_introducer()) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_announcement"] > c._debug1) self.failUnless(cdc["inbound_message"] > c._debug2) # there should have been no new announcements self.failUnlessEqual(cdc["new_announcement"], c._debug3) # and the right number of duplicate ones. There were # NUM_SERVERS from the servertub restart, and there should be # another NUM_SERVERS now self.failUnlessEqual(cdc["duplicate_announcement"], 2 * NUM_SERVERS) d.addCallback(_check3) return d
class SpeedTest: DO_IMMUTABLE = True DO_MUTABLE_CREATE = True DO_MUTABLE = True def __init__(self, test_client_dir): #self.real_stderr = sys.stderr log.startLogging(open("st.log", "a"), setStdout=False) f = open(os.path.join(test_client_dir, "private", "control.furl"), "r") self.control_furl = f.read().strip() f.close() self.base_service = service.MultiService() self.failed = None self.upload_times = {} self.download_times = {} def run(self): print "STARTING" d = fireEventually() d.addCallback(lambda res: self.setUp()) d.addCallback(lambda res: self.do_test()) d.addBoth(self.tearDown) def _err(err): self.failed = err log.err(err) print err d.addErrback(_err) def _done(res): reactor.stop() return res d.addBoth(_done) reactor.run() if self.failed: print "EXCEPTION" print self.failed sys.exit(1) def setUp(self): self.base_service.startService() self.tub = Tub() self.tub.setOption("expose-remote-exception-types", False) self.tub.setServiceParent(self.base_service) d = self.tub.getReference(self.control_furl) def _gotref(rref): self.client_rref = rref print "Got Client Control reference" return self.stall(5) d.addCallback(_gotref) return d def stall(self, delay, result=None): d = defer.Deferred() reactor.callLater(delay, d.callback, result) return d def record_times(self, times, key): print "TIME (%s): %s up, %s down" % (key, times[0], times[1]) self.upload_times[key], self.download_times[key] = times def one_test(self, res, name, count, size, mutable): # values for 'mutable': # False (upload a different CHK file for each 'count') # "create" (upload different contents into a new SSK file) # "upload" (upload different contents into the same SSK file. The # time consumed does not include the creation of the file) d = self.client_rref.callRemote("speed_test", count, size, mutable) d.addCallback(self.record_times, name) return d def measure_rtt(self, res): # use RIClient.get_nodeid() to measure the foolscap-level RTT d = self.client_rref.callRemote("measure_peer_response_time") def _got(res): assert len(res) # need at least one peer times = res.values() self.total_rtt = sum(times) self.average_rtt = sum(times) / len(times) self.max_rtt = max(times) print "num-peers: %d" % len(times) print "total-RTT: %f" % self.total_rtt print "average-RTT: %f" % self.average_rtt print "max-RTT: %f" % self.max_rtt d.addCallback(_got) return d def do_test(self): print "doing test" d = defer.succeed(None) d.addCallback(self.one_test, "startup", 1, 1000, False) #ignore this one d.addCallback(self.measure_rtt) if self.DO_IMMUTABLE: # immutable files d.addCallback(self.one_test, "1x 200B", 1, 200, False) d.addCallback(self.one_test, "10x 200B", 10, 200, False) def _maybe_do_100x_200B(res): if self.upload_times["10x 200B"] < 5: print "10x 200B test went too fast, doing 100x 200B test" return self.one_test(None, "100x 200B", 100, 200, False) return d.addCallback(_maybe_do_100x_200B) d.addCallback(self.one_test, "1MB", 1, 1 * MB, False) d.addCallback(self.one_test, "10MB", 1, 10 * MB, False) def _maybe_do_100MB(res): if self.upload_times["10MB"] > 30: print "10MB test took too long, skipping 100MB test" return return self.one_test(None, "100MB", 1, 100 * MB, False) d.addCallback(_maybe_do_100MB) if self.DO_MUTABLE_CREATE: # mutable file creation d.addCallback(self.one_test, "10x 200B SSK creation", 10, 200, "create") if self.DO_MUTABLE: # mutable file upload/download d.addCallback(self.one_test, "10x 200B SSK", 10, 200, "upload") def _maybe_do_100x_200B_SSK(res): if self.upload_times["10x 200B SSK"] < 5: print "10x 200B SSK test went too fast, doing 100x 200B SSK" return self.one_test(None, "100x 200B SSK", 100, 200, "upload") return d.addCallback(_maybe_do_100x_200B_SSK) d.addCallback(self.one_test, "1MB SSK", 1, 1 * MB, "upload") d.addCallback(self.calculate_speeds) return d def calculate_speeds(self, res): # time = A*size+B # we assume that A*200bytes is negligible if self.DO_IMMUTABLE: # upload if "100x 200B" in self.upload_times: B = self.upload_times["100x 200B"] / 100 else: B = self.upload_times["10x 200B"] / 10 print "upload per-file time: %.3fs" % B print "upload per-file times-avg-RTT: %f" % (B / self.average_rtt) print "upload per-file times-total-RTT: %f" % (B / self.total_rtt) A1 = 1 * MB / (self.upload_times["1MB"] - B) # in bytes per second print "upload speed (1MB):", self.number(A1, "Bps") A2 = 10 * MB / (self.upload_times["10MB"] - B) print "upload speed (10MB):", self.number(A2, "Bps") if "100MB" in self.upload_times: A3 = 100 * MB / (self.upload_times["100MB"] - B) print "upload speed (100MB):", self.number(A3, "Bps") # download if "100x 200B" in self.download_times: B = self.download_times["100x 200B"] / 100 else: B = self.download_times["10x 200B"] / 10 print "download per-file time: %.3fs" % B print "download per-file times-avg-RTT: %f" % (B / self.average_rtt) print "download per-file times-total-RTT: %f" % (B / self.total_rtt) A1 = 1 * MB / (self.download_times["1MB"] - B ) # in bytes per second print "download speed (1MB):", self.number(A1, "Bps") A2 = 10 * MB / (self.download_times["10MB"] - B) print "download speed (10MB):", self.number(A2, "Bps") if "100MB" in self.download_times: A3 = 100 * MB / (self.download_times["100MB"] - B) print "download speed (100MB):", self.number(A3, "Bps") if self.DO_MUTABLE_CREATE: # SSK creation B = self.upload_times["10x 200B SSK creation"] / 10 print "create per-file time SSK: %.3fs" % B if self.DO_MUTABLE: # upload SSK if "100x 200B SSK" in self.upload_times: B = self.upload_times["100x 200B SSK"] / 100 else: B = self.upload_times["10x 200B SSK"] / 10 print "upload per-file time SSK: %.3fs" % B A1 = 1 * MB / (self.upload_times["1MB SSK"] - B ) # in bytes per second print "upload speed SSK (1MB):", self.number(A1, "Bps") # download SSK if "100x 200B SSK" in self.download_times: B = self.download_times["100x 200B SSK"] / 100 else: B = self.download_times["10x 200B SSK"] / 10 print "download per-file time SSK: %.3fs" % B A1 = 1 * MB / (self.download_times["1MB SSK"] - B) # in bytes per # second print "download speed SSK (1MB):", self.number(A1, "Bps") def number(self, value, suffix=""): scaling = 1 if value < 1: fmt = "%1.2g%s" elif value < 100: fmt = "%.1f%s" elif value < 1000: fmt = "%d%s" elif value < 1e6: fmt = "%.2fk%s" scaling = 1e3 elif value < 1e9: fmt = "%.2fM%s" scaling = 1e6 elif value < 1e12: fmt = "%.2fG%s" scaling = 1e9 elif value < 1e15: fmt = "%.2fT%s" scaling = 1e12 elif value < 1e18: fmt = "%.2fP%s" scaling = 1e15 else: fmt = "huge! %g%s" return fmt % (value / scaling, suffix) def tearDown(self, res): d = self.base_service.stopService() d.addCallback(lambda ignored: res) return d
def do_system_test(self): self.create_tub() introducer = IntroducerService() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl # we have 5 clients who publish themselves as storage servers, and a # sixth which does which not. All 6 clients subscriber to hear about # storage. When the connections are fully established, all six nodes # should have 5 connections each. NUM_STORAGE = 5 NUM_CLIENTS = 6 clients = [] tubs = {} received_announcements = {} subscribing_clients = [] publishing_clients = [] printable_serverids = {} self.the_introducer = introducer privkeys = {} pubkeys = {} expected_announcements = [0 for c in range(NUM_CLIENTS)] for i in range(NUM_CLIENTS): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) portnum = iputil.allocate_tcp_port() tub.listenOn("tcp:%d" % portnum) tub.setLocation("localhost:%d" % portnum) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) c = IntroducerClient(tub, self.introducer_furl, NICKNAME % str(i), "version", "oldest", {"component": "component-v1"}, fakeseq, FilePath(self.mktemp())) received_announcements[c] = {} def got(key_s_or_tubid, ann, announcements): index = key_s_or_tubid or get_tubid_string_from_ann(ann) announcements[index] = ann c.subscribe_to("storage", got, received_announcements[c]) subscribing_clients.append(c) expected_announcements[i] += 1 # all expect a 'storage' announcement node_furl = tub.registerReference(Referenceable()) privkey_s, pubkey_s = keyutil.make_keypair() privkey, _ignored = keyutil.parse_privkey(privkey_s) privkeys[i] = privkey pubkeys[i] = pubkey_s if i < NUM_STORAGE: # sign all announcements c.publish("storage", make_ann(node_furl), privkey) assert pubkey_s.startswith("pub-") printable_serverids[i] = pubkey_s[len("pub-"):] publishing_clients.append(c) else: # the last one does not publish anything pass if i == 2: # also publish something that nobody cares about boring_furl = tub.registerReference(Referenceable()) c.publish("boring", make_ann(boring_furl), privkey) c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub def _wait_for_connected(ign): def _connected(): for c in clients: if not c.connected_to_introducer(): return False return True return self.poll(_connected) # we watch the clients to determine when the system has settled down. # Then we can look inside the server to assert things about its # state. def _wait_for_expected_announcements(ign): def _got_expected_announcements(): for i,c in enumerate(subscribing_clients): if len(received_announcements[c]) < expected_announcements[i]: return False return True return self.poll(_got_expected_announcements) # before shutting down any Tub, we'd like to know that there are no # messages outstanding def _wait_until_idle(ign): def _idle(): for c in subscribing_clients + publishing_clients: if c._debug_outstanding: return False if self.the_introducer._debug_outstanding: return False return True return self.poll(_idle) d = defer.succeed(None) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check1(res): log.msg("doing _check1") dc = self.the_introducer._debug_counts # each storage server publishes a record. There is also one # "boring" self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+1) self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) # the number of outbound messages is tricky.. I think it depends # upon a race between the publish and the subscribe messages. self.failUnless(dc["outbound_message"] > 0) # each client subscribes to "storage", and each server publishes self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE*NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE) anns = received_announcements[c] self.failUnlessEqual(len(anns), NUM_STORAGE) serverid0 = printable_serverids[0] ann = anns[serverid0] nick = ann["nickname"] self.failUnlessEqual(type(nick), unicode) self.failUnlessEqual(nick, NICKNAME % "0") for c in publishing_clients: cdc = c._debug_counts expected = 1 if c in [clients[2], # boring ]: expected = 2 self.failUnlessEqual(cdc["outbound_message"], expected) # now check the web status, make sure it renders without error ir = introweb.IntroducerRoot(self.parent) self.parent.nodeid = "NODEID" text = ir.renderSynchronously().decode("utf-8") self.failUnlessIn(NICKNAME % "0", text) # a v2 client self.failUnlessIn(NICKNAME % "1", text) # another v2 client for i in range(NUM_STORAGE): self.failUnlessIn(printable_serverids[i], text, (i,printable_serverids[i],text)) # make sure there isn't a double-base32ed string too self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text, (i,printable_serverids[i],text)) log.msg("_check1 done") d.addCallback(_check1) # force an introducer reconnect, by shutting down the Tub it's using # and starting a new Tub (with the old introducer). Everybody should # reconnect and republish, but the introducer should ignore the # republishes as duplicates. However, because the server doesn't know # what each client does and does not know, it will send them a copy # of the current announcement table anyway. d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) def _wait_for_introducer_loss(ign): def _introducer_lost(): for c in clients: if c.connected_to_introducer(): return False return True return self.poll(_introducer_lost) d.addCallback(_wait_for_introducer_loss) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 for k in self.the_introducer._debug_counts: self.the_introducer._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone self.create_tub(self.central_portnum) newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) d.addCallback(lambda _ign: log.msg(" reconnected")) # TODO: publish something while the introducer is offline, then # confirm it gets delivered when the connection is reestablished def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE*NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["inbound_message"], 1) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, # destroying the old introducer, and starting a new Tub+Introducer. # Everybody should reconnect and republish, and the (new) introducer # will distribute the new announcements, but the clients should # ignore the republishes as duplicates. d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) d.addCallback(_wait_for_introducer_loss) d.addCallback(lambda _ign: log.msg("introducer lost")) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone introducer = IntroducerService() self.the_introducer = introducer newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check3(res): log.msg("doing _check3") dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE*NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"] > 0) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check3) return d
class Node(service.MultiService): # this implements common functionality of both Client nodes and Introducer # nodes. NODETYPE = "unknown NODETYPE" PORTNUMFILE = None CERTFILE = "node.pem" GENERATED_FILES = [] def __init__(self, basedir=u"."): service.MultiService.__init__(self) self.basedir = abspath_expanduser_unicode(unicode(basedir)) self._portnumfile = os.path.join(self.basedir, self.PORTNUMFILE) fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700) open(os.path.join(self.basedir, "private", "README"), "w").write(PRIV_README) # creates self.config self.read_config() nickname_utf8 = self.get_config("node", "nickname", "<unspecified>") self.nickname = nickname_utf8.decode("utf-8") assert type(self.nickname) is unicode self.init_tempdir() self.create_tub() self.logSource = "Node" self.setup_logging() self.log("Node constructed. " + get_package_versions_string()) iputil.increase_rlimits() def init_tempdir(self): tempdir_config = self.get_config("node", "tempdir", "tmp").decode('utf-8') tempdir = abspath_expanduser_unicode(tempdir_config, base=self.basedir) if not os.path.exists(tempdir): fileutil.make_dirs(tempdir) tempfile.tempdir = tempdir # this should cause twisted.web.http (which uses # tempfile.TemporaryFile) to put large request bodies in the given # directory. Without this, the default temp dir is usually /tmp/, # which is frequently too small. test_name = tempfile.mktemp() _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir) @staticmethod def _contains_unescaped_hash(item): characters = iter(item) for c in characters: if c == '\\': characters.next() elif c == '#': return True return False def get_config(self, section, option, default=_None, boolean=False): try: if boolean: return self.config.getboolean(section, option) item = self.config.get(section, option) if option.endswith(".furl") and self._contains_unescaped_hash( item): raise UnescapedHashError(section, option, item) return item except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): if default is _None: fn = os.path.join(self.basedir, u"tahoe.cfg") raise MissingConfigEntry("%s is missing the [%s]%s entry" % (quote_output(fn), section, option)) return default def read_config(self): self.error_about_old_config_files() self.config = ConfigParser.SafeConfigParser() tahoe_cfg = os.path.join(self.basedir, "tahoe.cfg") try: self.config = configutil.get_config(tahoe_cfg) except EnvironmentError: if os.path.exists(tahoe_cfg): raise def error_about_old_config_files(self): """ If any old configuration files are detected, raise OldConfigError. """ oldfnames = set() for name in [ 'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl', 'disconnect_timeout', 'advertised_ip_addresses', 'introducer.furl', 'helper.furl', 'key_generator.furl', 'stats_gatherer.furl', 'no_storage', 'readonly_storage', 'sizelimit', 'debug_discard_storage', 'run_helper' ]: if name not in self.GENERATED_FILES: fullfname = os.path.join(self.basedir, name) if os.path.exists(fullfname): oldfnames.add(fullfname) if oldfnames: e = OldConfigError(oldfnames) twlog.msg(e) raise e def _convert_tub_port(self, s): if re.search(r'^\d+$', s): return "tcp:%d" % int(s) return s def get_tub_port(self): # return a descriptor string cfg_tubport = self.get_config("node", "tub.port", "") if cfg_tubport: return self._convert_tub_port(cfg_tubport) # For 'tub.port', tahoe.cfg overrides the individual file on disk. So # only read self._portnumfile if tahoe.cfg doesn't provide a value. if os.path.exists(self._portnumfile): file_tubport = fileutil.read(self._portnumfile).strip() return self._convert_tub_port(file_tubport) tubport = "tcp:%d" % iputil.allocate_tcp_port() fileutil.write_atomically(self._portnumfile, tubport + "\n", mode="") return tubport def get_tub_location(self, tubport): location = self.get_config("node", "tub.location", "AUTO") # Replace the location "AUTO", if present, with the detected local # addresses. Don't probe for local addresses unless necessary. split_location = location.split(",") if "AUTO" in split_location: local_addresses = iputil.get_local_addresses_sync() # tubport must be like "tcp:12345" or "tcp:12345:morestuff" local_portnum = int(tubport.split(":")[1]) new_locations = [] for loc in split_location: if loc == "AUTO": new_locations.extend([ "tcp:%s:%d" % (ip, local_portnum) for ip in local_addresses ]) else: new_locations.append(loc) return ",".join(new_locations) def create_tub(self): certfile = os.path.join(self.basedir, "private", self.CERTFILE) self.tub = Tub(certFile=certfile) self.tub_options = { "logLocalFailures": True, "logRemoteFailures": True, "expose-remote-exception-types": False, } # see #521 for a discussion of how to pick these timeout values. keepalive_timeout_s = self.get_config("node", "timeout.keepalive", "") if keepalive_timeout_s: self.tub_options["keepaliveTimeout"] = int(keepalive_timeout_s) disconnect_timeout_s = self.get_config("node", "timeout.disconnect", "") if disconnect_timeout_s: # N.B.: this is in seconds, so use "1800" to get 30min self.tub_options["disconnectTimeout"] = int(disconnect_timeout_s) for (name, value) in self.tub_options.items(): self.tub.setOption(name, value) self.nodeid = b32decode(self.tub.tubID.upper()) # binary format self.write_config("my_nodeid", b32encode(self.nodeid).lower() + "\n") self.short_nodeid = b32encode( self.nodeid).lower()[:8] # ready for printing tubport = self.get_tub_port() if tubport in ("0", "tcp:0"): raise ValueError("tub.port cannot be 0: you must choose") self.tub.listenOn(tubport) location = self.get_tub_location(tubport) self.tub.setLocation(location) self.log("Tub location set to %s" % (location, )) # the Tub is now ready for tub.registerReference() self.tub.setServiceParent(self) def get_app_versions(self): # TODO: merge this with allmydata.get_package_versions return dict(app_versions.versions) def get_config_from_file(self, name, required=False): """Get the (string) contents of a config file, or None if the file did not exist. If required=True, raise an exception rather than returning None. Any leading or trailing whitespace will be stripped from the data.""" fn = os.path.join(self.basedir, name) try: return fileutil.read(fn).strip() except EnvironmentError: if not required: return None raise def write_private_config(self, name, value): """Write the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. """ privname = os.path.join(self.basedir, "private", name) open(privname, "w").write(value) def get_private_config(self, name, default=_None): """Read the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Return a default, or raise an error if one was not given. """ privname = os.path.join(self.basedir, "private", name) try: return fileutil.read(privname).strip() except EnvironmentError: if os.path.exists(privname): raise if default is _None: raise MissingConfigEntry( "The required configuration file %s is missing." % (quote_output(privname), )) return default def get_or_create_private_config(self, name, default=_None): """Try to get the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Any leading or trailing whitespace will be stripped from the data. If the file does not exist, and default is not given, report an error. If the file does not exist and a default is specified, try to create it using that default, and then return the value that was written. If 'default' is a string, use it as a default value. If not, treat it as a zero-argument callable that is expected to return a string. """ privname = os.path.join(self.basedir, "private", name) try: value = fileutil.read(privname) except EnvironmentError: if os.path.exists(privname): raise if default is _None: raise MissingConfigEntry( "The required configuration file %s is missing." % (quote_output(privname), )) if isinstance(default, basestring): value = default else: value = default() fileutil.write(privname, value) return value.strip() def write_config(self, name, value, mode="w"): """Write a string to a config file.""" fn = os.path.join(self.basedir, name) try: fileutil.write(fn, value, mode) except EnvironmentError, e: self.log("Unable to write config file '%s'" % fn) self.log(e)
class Node(service.MultiService): """ This class implements common functionality of both Client nodes and Introducer nodes. """ NODETYPE = "unknown NODETYPE" CERTFILE = "node.pem" def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider): """ Initialize the node with the given configuration. Its base directory is the current directory by default. """ service.MultiService.__init__(self) self.config = config self.get_config = config.get_config # XXX stopgap self.nickname = config.nickname # XXX stopgap # this can go away once Client.init_client_storage_broker is moved into create_client() # (tests sometimes have None here) self._i2p_provider = i2p_provider self._tor_provider = tor_provider self.create_log_tub() self.logSource = "Node" self.setup_logging() self.tub = main_tub if self.tub is not None: self.nodeid = b32decode(self.tub.tubID.upper()) # binary format self.short_nodeid = b32encode( self.nodeid).lower()[:8] # for printing self.config.write_config_file("my_nodeid", b32encode(self.nodeid).lower() + b"\n", mode="wb") self.tub.setServiceParent(self) else: self.nodeid = self.short_nodeid = None self.control_tub = control_tub if self.control_tub is not None: self.control_tub.setServiceParent(self) self.log("Node constructed. " + __full_version__) iputil.increase_rlimits() def _is_tub_listening(self): """ :returns: True if the main tub is listening """ return len(self.tub.getListeners()) > 0 # pull this outside of Node's __init__ too, see: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2948 def create_log_tub(self): # The logport uses a localhost-only ephemeral Tub, with no control # over the listening port or location. This might change if we # discover a compelling reason for it in the future (e.g. being able # to use "flogtool tail" against a remote server), but for now I # think we can live without it. self.log_tub = Tub() portnum = iputil.listenOnUnused(self.log_tub) self.log("Log Tub location set to 127.0.0.1:%s" % (portnum, )) self.log_tub.setServiceParent(self) def startService(self): # Note: this class can be started and stopped at most once. self.log("Node.startService") # Record the process id in the twisted log, after startService() # (__init__ is called before fork(), but startService is called # after). Note that Foolscap logs handle pid-logging by itself, no # need to send a pid to the foolscap log here. twlog.msg("My pid: %s" % os.getpid()) try: os.chmod("twistd.pid", 0o644) except EnvironmentError: pass service.MultiService.startService(self) self.log("%s running" % self.NODETYPE) twlog.msg("%s running" % self.NODETYPE) def stopService(self): self.log("Node.stopService") return service.MultiService.stopService(self) def shutdown(self): """Shut down the node. Returns a Deferred that fires (with None) when it finally stops kicking.""" self.log("Node.shutdown") return self.stopService() def setup_logging(self): # we replace the formatTime() method of the log observer that # twistd set up for us, with a method that uses our preferred # timestamp format. for o in twlog.theLogPublisher.observers: # o might be a FileLogObserver's .emit method if type(o) is type(self.setup_logging): # bound method ob = o.__self__ if isinstance(ob, twlog.FileLogObserver): newmeth = types.MethodType(formatTimeTahoeStyle, ob) ob.formatTime = newmeth # TODO: twisted >2.5.0 offers maxRotatedFiles=50 lgfurl_file = self.config.get_private_path("logport.furl").encode( get_filesystem_encoding()) if os.path.exists(lgfurl_file): os.remove(lgfurl_file) self.log_tub.setOption("logport-furlfile", lgfurl_file) lgfurl = self.config.get_config("node", "log_gatherer.furl", "") if lgfurl: # this is in addition to the contents of log-gatherer-furlfile lgfurl = lgfurl.encode("utf-8") self.log_tub.setOption("log-gatherer-furl", lgfurl) self.log_tub.setOption( "log-gatherer-furlfile", self.config.get_config_path("log_gatherer.furl")) incident_dir = self.config.get_config_path("logs", "incidents") foolscap.logging.log.setLogDir(incident_dir) twlog.msg("Foolscap logging initialized") twlog.msg("Note to developers: twistd.log does not receive very much.") twlog.msg( "Use 'flogtool tail -c NODEDIR/private/logport.furl' instead") twlog.msg("and read docs/logging.rst") def log(self, *args, **kwargs): return log.msg(*args, **kwargs)
class Node(service.MultiService): # this implements common functionality of both Client nodes and Introducer # nodes. NODETYPE = "unknown NODETYPE" PORTNUMFILE = None CERTFILE = "node.pem" GENERATED_FILES = [] def __init__(self, basedir=u"."): service.MultiService.__init__(self) self.basedir = abspath_expanduser_unicode(unicode(basedir)) self._portnumfile = os.path.join(self.basedir, self.PORTNUMFILE) self._tub_ready_observerlist = observer.OneShotObserverList() fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700) open(os.path.join(self.basedir, "private", "README"), "w").write(PRIV_README) # creates self.config self.read_config() nickname_utf8 = self.get_config("node", "nickname", "<unspecified>") self.nickname = nickname_utf8.decode("utf-8") assert type(self.nickname) is unicode self.init_tempdir() self.create_tub() self.logSource="Node" self.setup_ssh() self.setup_logging() self.log("Node constructed. " + get_package_versions_string()) iputil.increase_rlimits() def init_tempdir(self): local_tempdir_utf8 = "tmp" # default is NODEDIR/tmp/ tempdir = self.get_config("node", "tempdir", local_tempdir_utf8).decode('utf-8') tempdir = os.path.join(self.basedir, tempdir) if not os.path.exists(tempdir): fileutil.make_dirs(tempdir) tempfile.tempdir = abspath_expanduser_unicode(tempdir) # this should cause twisted.web.http (which uses # tempfile.TemporaryFile) to put large request bodies in the given # directory. Without this, the default temp dir is usually /tmp/, # which is frequently too small. test_name = tempfile.mktemp() _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir) @staticmethod def _contains_unescaped_hash(item): characters = iter(item) for c in characters: if c == '\\': characters.next() elif c == '#': return True return False def get_config(self, section, option, default=_None, boolean=False): try: if boolean: return self.config.getboolean(section, option) item = self.config.get(section, option) if option.endswith(".furl") and self._contains_unescaped_hash(item): raise UnescapedHashError(section, option, item) return item except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): if default is _None: fn = os.path.join(self.basedir, u"tahoe.cfg") raise MissingConfigEntry("%s is missing the [%s]%s entry" % (quote_output(fn), section, option)) return default def set_config(self, section, option, value): if not self.config.has_section(section): self.config.add_section(section) self.config.set(section, option, value) assert self.config.get(section, option) == value def read_config(self): self.error_about_old_config_files() self.config = ConfigParser.SafeConfigParser() tahoe_cfg = os.path.join(self.basedir, "tahoe.cfg") try: f = open(tahoe_cfg, "rb") try: # Skip any initial Byte Order Mark. Since this is an ordinary file, we # don't need to handle incomplete reads, and can assume seekability. if f.read(3) != '\xEF\xBB\xBF': f.seek(0) self.config.readfp(f) finally: f.close() except EnvironmentError: if os.path.exists(tahoe_cfg): raise cfg_tubport = self.get_config("node", "tub.port", "") if not cfg_tubport: # For 'tub.port', tahoe.cfg overrides the individual file on # disk. So only read self._portnumfile if tahoe.cfg doesn't # provide a value. try: file_tubport = fileutil.read(self._portnumfile).strip() self.set_config("node", "tub.port", file_tubport) except EnvironmentError: if os.path.exists(self._portnumfile): raise def error_about_old_config_files(self): """ If any old configuration files are detected, raise OldConfigError. """ oldfnames = set() for name in [ 'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl', 'disconnect_timeout', 'advertised_ip_addresses', 'introducer.furl', 'helper.furl', 'key_generator.furl', 'stats_gatherer.furl', 'no_storage', 'readonly_storage', 'sizelimit', 'debug_discard_storage', 'run_helper']: if name not in self.GENERATED_FILES: fullfname = os.path.join(self.basedir, name) if os.path.exists(fullfname): oldfnames.add(fullfname) if oldfnames: e = OldConfigError(oldfnames) twlog.msg(e) raise e def create_tub(self): certfile = os.path.join(self.basedir, "private", self.CERTFILE) self.tub = Tub(certFile=certfile) self.tub.setOption("logLocalFailures", True) self.tub.setOption("logRemoteFailures", True) self.tub.setOption("expose-remote-exception-types", False) # see #521 for a discussion of how to pick these timeout values. keepalive_timeout_s = self.get_config("node", "timeout.keepalive", "") if keepalive_timeout_s: self.tub.setOption("keepaliveTimeout", int(keepalive_timeout_s)) disconnect_timeout_s = self.get_config("node", "timeout.disconnect", "") if disconnect_timeout_s: # N.B.: this is in seconds, so use "1800" to get 30min self.tub.setOption("disconnectTimeout", int(disconnect_timeout_s)) self.nodeid = b32decode(self.tub.tubID.upper()) # binary format self.write_config("my_nodeid", b32encode(self.nodeid).lower() + "\n") self.short_nodeid = b32encode(self.nodeid).lower()[:8] # ready for printing tubport = self.get_config("node", "tub.port", "tcp:0") self.tub.listenOn(tubport) # we must wait until our service has started before we can find out # our IP address and thus do tub.setLocation, and we can't register # any services with the Tub until after that point self.tub.setServiceParent(self) def setup_ssh(self): ssh_port = self.get_config("node", "ssh.port", "") if ssh_port: ssh_keyfile = self.get_config("node", "ssh.authorized_keys_file").decode('utf-8') from allmydata import manhole m = manhole.AuthorizedKeysManhole(ssh_port, ssh_keyfile.encode(get_filesystem_encoding())) m.setServiceParent(self) self.log("AuthorizedKeysManhole listening on %s" % ssh_port) def get_app_versions(self): # TODO: merge this with allmydata.get_package_versions return dict(app_versions.versions) def get_config_from_file(self, name, required=False): """Get the (string) contents of a config file, or None if the file did not exist. If required=True, raise an exception rather than returning None. Any leading or trailing whitespace will be stripped from the data.""" fn = os.path.join(self.basedir, name) try: return fileutil.read(fn).strip() except EnvironmentError: if not required: return None raise def write_private_config(self, name, value): """Write the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. """ privname = os.path.join(self.basedir, "private", name) open(privname, "w").write(value) def get_private_config(self, name, default=_None): """Read the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Return a default, or raise an error if one was not given. """ privname = os.path.join(self.basedir, "private", name) try: return fileutil.read(privname) except EnvironmentError: if os.path.exists(privname): raise if default is _None: raise MissingConfigEntry("The required configuration file %s is missing." % (quote_output(privname),)) return default def get_or_create_private_config(self, name, default=_None): """Try to get the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Any leading or trailing whitespace will be stripped from the data. If the file does not exist, and default is not given, report an error. If the file does not exist and a default is specified, try to create it using that default, and then return the value that was written. If 'default' is a string, use it as a default value. If not, treat it as a zero-argument callable that is expected to return a string. """ privname = os.path.join(self.basedir, "private", name) try: value = fileutil.read(privname) except EnvironmentError: if os.path.exists(privname): raise if default is _None: raise MissingConfigEntry("The required configuration file %s is missing." % (quote_output(privname),)) if isinstance(default, basestring): value = default else: value = default() fileutil.write(privname, value) return value.strip() def write_config(self, name, value, mode="w"): """Write a string to a config file.""" fn = os.path.join(self.basedir, name) try: fileutil.write(fn, value, mode) except EnvironmentError, e: self.log("Unable to write config file '%s'" % fn) self.log(e)
class SystemFramework(pollmixin.PollMixin): numnodes = 7 def __init__(self, basedir, mode): self.basedir = basedir = abspath_expanduser_unicode(unicode(basedir)) if not (basedir + os.path.sep).startswith(abspath_expanduser_unicode(u".") + os.path.sep): raise AssertionError("safety issue: basedir must be a subdir") self.testdir = testdir = os.path.join(basedir, "test") if os.path.exists(testdir): shutil.rmtree(testdir) fileutil.make_dirs(testdir) self.sparent = service.MultiService() self.sparent.startService() self.proc = None self.tub = Tub() self.tub.setOption("expose-remote-exception-types", False) self.tub.setServiceParent(self.sparent) self.mode = mode self.failed = False self.keepalive_file = None def run(self): framelog = os.path.join(self.basedir, "driver.log") log.startLogging(open(framelog, "a"), setStdout=False) log.msg("CHECK_MEMORY(mode=%s) STARTING" % self.mode) #logfile = open(os.path.join(self.testdir, "log"), "w") #flo = log.FileLogObserver(logfile) #log.startLoggingWithObserver(flo.emit, setStdout=False) d = fireEventually() d.addCallback(lambda res: self.setUp()) d.addCallback(lambda res: self.record_initial_memusage()) d.addCallback(lambda res: self.make_nodes()) d.addCallback(lambda res: self.wait_for_client_connected()) d.addCallback(lambda res: self.do_test()) d.addBoth(self.tearDown) def _err(err): self.failed = err log.err(err) print err d.addErrback(_err) def _done(res): reactor.stop() return res d.addBoth(_done) reactor.run() if self.failed: # raiseException doesn't work for CopiedFailures self.failed.raiseException() def setUp(self): #print "STARTING" self.stats = {} self.statsfile = open(os.path.join(self.basedir, "stats.out"), "a") d = self.make_introducer() def _more(res): return self.start_client() d.addCallback(_more) def _record_control_furl(control_furl): self.control_furl = control_furl #print "OBTAINING '%s'" % (control_furl,) return self.tub.getReference(self.control_furl) d.addCallback(_record_control_furl) def _record_control(control_rref): self.control_rref = control_rref d.addCallback(_record_control) def _ready(res): #print "CLIENT READY" pass d.addCallback(_ready) return d def record_initial_memusage(self): print print "Client started (no connections yet)" d = self._print_usage() d.addCallback(self.stash_stats, "init") return d def wait_for_client_connected(self): print print "Client connecting to other nodes.." return self.control_rref.callRemote("wait_for_client_connections", self.numnodes+1) def tearDown(self, passthrough): # the client node will shut down in a few seconds #os.remove(os.path.join(self.clientdir, "suicide_prevention_hotline")) log.msg("shutting down SystemTest services") if self.keepalive_file and os.path.exists(self.keepalive_file): age = time.time() - os.stat(self.keepalive_file)[stat.ST_MTIME] log.msg("keepalive file at shutdown was %ds old" % age) d = defer.succeed(None) if self.proc: d.addCallback(lambda res: self.kill_client()) d.addCallback(lambda res: self.sparent.stopService()) d.addCallback(lambda res: flushEventualQueue()) def _close_statsfile(res): self.statsfile.close() d.addCallback(_close_statsfile) d.addCallback(lambda res: passthrough) return d def add_service(self, s): s.setServiceParent(self.sparent) return s def make_introducer(self): iv_basedir = os.path.join(self.testdir, "introducer") os.mkdir(iv_basedir) iv = introducer.IntroducerNode(basedir=iv_basedir) self.introducer = self.add_service(iv) d = self.introducer.when_tub_ready() def _introducer_ready(res): q = self.introducer self.introducer_furl = q.introducer_url d.addCallback(_introducer_ready) return d def make_nodes(self): self.nodes = [] for i in range(self.numnodes): nodedir = os.path.join(self.testdir, "node%d" % i) os.mkdir(nodedir) f = open(os.path.join(nodedir, "tahoe.cfg"), "w") f.write("[client]\n" "introducer.furl = %s\n" "shares.happy = 1\n" "[storage]\n" % (self.introducer_furl,)) # the only tests for which we want the internal nodes to actually # retain shares are the ones where somebody's going to download # them. if self.mode in ("download", "download-GET", "download-GET-slow"): # retain shares pass else: # for these tests, we tell the storage servers to pretend to # accept shares, but really just throw them out, since we're # only testing upload and not download. f.write("debug_discard = true\n") if self.mode in ("receive",): # for this mode, the client-under-test gets all the shares, # so our internal nodes can refuse requests f.write("readonly = true\n") f.close() c = self.add_service(client.Client(basedir=nodedir)) self.nodes.append(c) # the peers will start running, eventually they will connect to each # other and the introducer def touch_keepalive(self): if os.path.exists(self.keepalive_file): age = time.time() - os.stat(self.keepalive_file)[stat.ST_MTIME] log.msg("touching keepalive file, was %ds old" % age) f = open(self.keepalive_file, "w") f.write("""\ If the node notices this file at startup, it will poll every 5 seconds and terminate if the file is more than 10 seconds old, or if it has been deleted. If the test harness has an internal failure and neglects to kill off the node itself, this helps to avoid leaving processes lying around. The contents of this file are ignored. """) f.close() def start_client(self): # this returns a Deferred that fires with the client's control.furl log.msg("MAKING CLIENT") # self.testdir is an absolute Unicode path clientdir = self.clientdir = os.path.join(self.testdir, u"client") clientdir_str = clientdir.encode(get_filesystem_encoding()) quiet = StringIO() create_node.create_node({'basedir': clientdir}, out=quiet) log.msg("DONE MAKING CLIENT") # now replace tahoe.cfg # set webport=0 and then ask the node what port it picked. f = open(os.path.join(clientdir, "tahoe.cfg"), "w") f.write("[node]\n" "web.port = tcp:0:interface=127.0.0.1\n" "[client]\n" "introducer.furl = %s\n" "shares.happy = 1\n" "[storage]\n" % (self.introducer_furl,)) if self.mode in ("upload-self", "receive"): # accept and store shares, to trigger the memory consumption bugs pass else: # don't accept any shares f.write("readonly = true\n") ## also, if we do receive any shares, throw them away #f.write("debug_discard = true") if self.mode == "upload-self": pass f.close() self.keepalive_file = os.path.join(clientdir, "suicide_prevention_hotline") # now start updating the mtime. self.touch_keepalive() ts = internet.TimerService(1.0, self.touch_keepalive) ts.setServiceParent(self.sparent) pp = ClientWatcher() self.proc_done = pp.d = defer.Deferred() logfile = os.path.join(self.basedir, "client.log") cmd = ["twistd", "-n", "-y", "tahoe-client.tac", "-l", logfile] env = os.environ.copy() self.proc = reactor.spawnProcess(pp, cmd[0], cmd, env, path=clientdir_str) log.msg("CLIENT STARTED") # now we wait for the client to get started. we're looking for the # control.furl file to appear. furl_file = os.path.join(clientdir, "private", "control.furl") url_file = os.path.join(clientdir, "node.url") def _check(): if pp.ended and pp.ended.value.status != 0: # the twistd process ends normally (with rc=0) if the child # is successfully launched. It ends abnormally (with rc!=0) # if the child cannot be launched. raise ChildDidNotStartError("process ended while waiting for startup") return os.path.exists(furl_file) d = self.poll(_check, 0.1) # once it exists, wait a moment before we read from it, just in case # it hasn't finished writing the whole thing. Ideally control.furl # would be created in some atomic fashion, or made non-readable until # it's ready, but I can't think of an easy way to do that, and I # think the chances that we'll observe a half-write are pretty low. def _stall(res): d2 = defer.Deferred() reactor.callLater(0.1, d2.callback, None) return d2 d.addCallback(_stall) def _read(res): # read the node's URL self.webish_url = open(url_file, "r").read().strip() if self.webish_url[-1] == "/": # trim trailing slash, since the rest of the code wants it gone self.webish_url = self.webish_url[:-1] f = open(furl_file, "r") furl = f.read() return furl.strip() d.addCallback(_read) return d def kill_client(self): # returns a Deferred that fires when the process exits. This may only # be called once. try: self.proc.signalProcess("INT") except error.ProcessExitedAlready: pass return self.proc_done def create_data(self, name, size): filename = os.path.join(self.testdir, name + ".data") f = open(filename, "wb") block = "a" * 8192 while size > 0: l = min(size, 8192) f.write(block[:l]) size -= l return filename def stash_stats(self, stats, name): self.statsfile.write("%s %s: %d\n" % (self.mode, name, stats['VmPeak'])) self.statsfile.flush() self.stats[name] = stats['VmPeak'] def POST(self, urlpath, **fields): url = self.webish_url + urlpath sepbase = "boogabooga" sep = "--" + sepbase form = [] form.append(sep) form.append('Content-Disposition: form-data; name="_charset"') form.append('') form.append('UTF-8') form.append(sep) for name, value in fields.iteritems(): if isinstance(value, tuple): filename, value = value form.append('Content-Disposition: form-data; name="%s"; ' 'filename="%s"' % (name, filename)) else: form.append('Content-Disposition: form-data; name="%s"' % name) form.append('') form.append(value) form.append(sep) form[-1] += "--" body = "\r\n".join(form) + "\r\n" headers = {"content-type": "multipart/form-data; boundary=%s" % sepbase, } return tw_client.getPage(url, method="POST", postdata=body, headers=headers, followRedirect=False) def GET_discard(self, urlpath, stall): url = self.webish_url + urlpath + "?filename=dummy-get.out" return discardPage(url, stall) def _print_usage(self, res=None): d = self.control_rref.callRemote("get_memory_usage") def _print(stats): print "VmSize: %9d VmPeak: %9d" % (stats["VmSize"], stats["VmPeak"]) return stats d.addCallback(_print) return d def _do_upload(self, res, size, files, uris): name = '%d' % size print print "uploading %s" % name if self.mode in ("upload", "upload-self"): files[name] = self.create_data(name, size) d = self.control_rref.callRemote("upload_from_file_to_uri", files[name].encode("utf-8"), convergence="check-memory") def _done(uri): os.remove(files[name]) del files[name] return uri d.addCallback(_done) elif self.mode == "upload-POST": data = "a" * size url = "/uri" d = self.POST(url, t="upload", file=("%d.data" % size, data)) elif self.mode in ("receive", "download", "download-GET", "download-GET-slow"): # mode=receive: upload the data from a local peer, so that the # client-under-test receives and stores the shares # # mode=download*: upload the data from a local peer, then have # the client-under-test download it. # # we need to wait until the uploading node has connected to all # peers, since the wait_for_client_connections() above doesn't # pay attention to our self.nodes[] and their connections. files[name] = self.create_data(name, size) u = self.nodes[0].getServiceNamed("uploader") d = self.nodes[0].debug_wait_for_client_connections(self.numnodes+1) d.addCallback(lambda res: u.upload(upload.FileName(files[name], convergence="check-memory"))) d.addCallback(lambda results: results.uri) else: raise ValueError("unknown mode=%s" % self.mode) def _complete(uri): uris[name] = uri print "uploaded %s" % name d.addCallback(_complete) return d def _do_download(self, res, size, uris): if self.mode not in ("download", "download-GET", "download-GET-slow"): return name = '%d' % size print "downloading %s" % name uri = uris[name] if self.mode == "download": d = self.control_rref.callRemote("download_from_uri_to_file", uri, "dummy.out") elif self.mode == "download-GET": url = "/uri/%s" % uri d = self.GET_discard(urllib.quote(url), stall=False) elif self.mode == "download-GET-slow": url = "/uri/%s" % uri d = self.GET_discard(urllib.quote(url), stall=True) def _complete(res): print "downloaded %s" % name return res d.addCallback(_complete) return d def do_test(self): #print "CLIENT STARTED" #print "FURL", self.control_furl #print "RREF", self.control_rref #print kB = 1000; MB = 1000*1000 files = {} uris = {} d = self._print_usage() d.addCallback(self.stash_stats, "0B") for i in range(10): d.addCallback(self._do_upload, 10*kB+i, files, uris) d.addCallback(self._do_download, 10*kB+i, uris) d.addCallback(self._print_usage) d.addCallback(self.stash_stats, "10kB") for i in range(3): d.addCallback(self._do_upload, 10*MB+i, files, uris) d.addCallback(self._do_download, 10*MB+i, uris) d.addCallback(self._print_usage) d.addCallback(self.stash_stats, "10MB") for i in range(1): d.addCallback(self._do_upload, 50*MB+i, files, uris) d.addCallback(self._do_download, 50*MB+i, uris) d.addCallback(self._print_usage) d.addCallback(self.stash_stats, "50MB") #for i in range(1): # d.addCallback(self._do_upload, 100*MB+i, files, uris) # d.addCallback(self._do_download, 100*MB+i, uris) # d.addCallback(self._print_usage) #d.addCallback(self.stash_stats, "100MB") #d.addCallback(self.stall) def _done(res): print "FINISHING" d.addCallback(_done) return d def stall(self, res): d = defer.Deferred() reactor.callLater(5, d.callback, None) return d
class NativeStorageServer(service.MultiService): """I hold information about a storage server that we want to connect to. If we are connected, I hold the RemoteReference, their host address, and the their version information. I remember information about when we were last connected too, even if we aren't currently connected. @ivar last_connect_time: when we last established a connection @ivar last_loss_time: when we last lost a connection @ivar version: the server's versiondict, from the most recent announcement @ivar nickname: the server's self-reported nickname (unicode), same @ivar rref: the RemoteReference, if connected, otherwise None @ivar remote_host: the IAddress, if connected, otherwise None """ implements(IServer) VERSION_DEFAULTS = { "http://allmydata.org/tahoe/protocols/storage/v1": { "maximum-immutable-share-size": 2**32 - 1, "maximum-mutable-share-size": 2 * 1000 * 1000 * 1000, # maximum prior to v1.9.2 "tolerates-immutable-read-overrun": False, "delete-mutable-shares-with-zero-length-writev": False, "available-space": None, }, "application-version": "unknown: no get_version()", } def __init__(self, key_s, ann, tub_options={}, tub_handlers={}): service.MultiService.__init__(self) self.key_s = key_s self.announcement = ann self._tub_options = tub_options self._tub_handlers = tub_handlers assert "anonymous-storage-FURL" in ann, ann furl = str(ann["anonymous-storage-FURL"]) m = re.match(r'pb://(\w+)@', furl) assert m, furl tubid_s = m.group(1).lower() self._tubid = base32.a2b(tubid_s) assert "permutation-seed-base32" in ann, ann ps = base32.a2b(str(ann["permutation-seed-base32"])) self._permutation_seed = ps if key_s: self._long_description = key_s if key_s.startswith("v0-"): # remove v0- prefix from abbreviated name self._short_description = key_s[3:3 + 8] else: self._short_description = key_s[:8] else: self._long_description = tubid_s self._short_description = tubid_s[:6] self.last_connect_time = None self.last_loss_time = None self.remote_host = None self.rref = None self._is_connected = False self._reconnector = None self._trigger_cb = None self._on_status_changed = ObserverList() def on_status_changed(self, status_changed): """ :param status_changed: a callable taking a single arg (the NativeStorageServer) that is notified when we become connected """ return self._on_status_changed.subscribe(status_changed) # Special methods used by copy.copy() and copy.deepcopy(). When those are # used in allmydata.immutable.filenode to copy CheckResults during # repair, we want it to treat the IServer instances as singletons, and # not attempt to duplicate them.. def __copy__(self): return self def __deepcopy__(self, memodict): return self def __repr__(self): return "<NativeStorageServer for %s>" % self.get_name() def get_serverid(self): return self._tubid # XXX replace with self.key_s def get_permutation_seed(self): return self._permutation_seed def get_version(self): if self.rref: return self.rref.version return None def get_name(self): # keep methodname short # TODO: decide who adds [] in the short description. It should # probably be the output side, not here. return self._short_description def get_longname(self): return self._long_description def get_lease_seed(self): return self._tubid def get_foolscap_write_enabler_seed(self): return self._tubid def get_nickname(self): return self.announcement["nickname"] def get_announcement(self): return self.announcement def get_remote_host(self): return self.remote_host def is_connected(self): return self._is_connected def get_last_connect_time(self): return self.last_connect_time def get_last_loss_time(self): return self.last_loss_time def get_last_received_data_time(self): if self.rref is None: return None else: return self.rref.getDataLastReceivedAt() def get_available_space(self): version = self.get_version() if version is None: return None protocol_v1_version = version.get( 'http://allmydata.org/tahoe/protocols/storage/v1', {}) available_space = protocol_v1_version.get('available-space') if available_space is None: available_space = protocol_v1_version.get( 'maximum-immutable-share-size', None) return available_space def start_connecting(self, trigger_cb): self._tub = Tub() for (name, value) in self._tub_options.items(): self._tub.setOption(name, value) # XXX todo: set tub handlers self._tub.setServiceParent(self) furl = str(self.announcement["anonymous-storage-FURL"]) self._trigger_cb = trigger_cb self._reconnector = self._tub.connectTo(furl, self._got_connection) def _got_connection(self, rref): lp = log.msg(format="got connection to %(name)s, getting versions", name=self.get_name(), facility="tahoe.storage_broker", umid="coUECQ") if self._trigger_cb: eventually(self._trigger_cb) default = self.VERSION_DEFAULTS d = add_version_to_remote_reference(rref, default) d.addCallback(self._got_versioned_service, lp) d.addCallback(lambda ign: self._on_status_changed.notify(self)) d.addErrback(log.err, format="storageclient._got_connection", name=self.get_name(), umid="Sdq3pg") def _got_versioned_service(self, rref, lp): log.msg(format="%(name)s provided version info %(version)s", name=self.get_name(), version=rref.version, facility="tahoe.storage_broker", umid="SWmJYg", level=log.NOISY, parent=lp) self.last_connect_time = time.time() self.remote_host = rref.getPeer() self.rref = rref self._is_connected = True rref.notifyOnDisconnect(self._lost) def get_rref(self): return self.rref def _lost(self): log.msg(format="lost connection to %(name)s", name=self.get_name(), facility="tahoe.storage_broker", umid="zbRllw") self.last_loss_time = time.time() # self.rref is now stale: all callRemote()s will get a # DeadReferenceError. We leave the stale reference in place so that # uploader/downloader code (which received this IServer through # get_connected_servers() or get_servers_for_psi()) can continue to # use s.get_rref().callRemote() and not worry about it being None. self._is_connected = False self.remote_host = None def stop_connecting(self): # used when this descriptor has been superceded by another self._reconnector.stopConnecting() def try_to_connect(self): # used when the broker wants us to hurry up self._reconnector.reset()