def test_reloadable(self): basedir = "test_client.Run.test_reloadable" os.mkdir(basedir) dummy = "pb://[email protected]:58889/bogus" fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy) c1 = yield client.create_client(basedir) c1.setServiceParent(self.sparent) # delay to let the service start up completely. I'm not entirely sure # this is necessary. yield self.stall(delay=2.0) yield c1.disownServiceParent() # the cygwin buildslave seems to need more time to let the old # service completely shut down. When delay=0.1, I saw this test fail, # probably due to the logport trying to reclaim the old socket # number. This suggests that either we're dropping a Deferred # somewhere in the shutdown sequence, or that cygwin is just cranky. yield self.stall(delay=2.0) # TODO: pause for slightly over one second, to let # Client._check_exit_trigger poll the file once. That will exercise # another few lines. Then add another test in which we don't # update the file at all, and watch to see the node shutdown. # (To do this, use a modified node which overrides Node.shutdown(), # also change _check_exit_trigger to use it instead of a raw # reactor.stop, also instrument the shutdown event in an # attribute that we can check.) c2 = yield client.create_client(basedir) c2.setServiceParent(self.sparent) yield c2.disownServiceParent()
def test_comment(self): """ An unescaped comment character (#) in a furl results in an UnescapedHashError Failure. """ should_fail = [r"test#test", r"#testtest", r"test\\#test"] should_not_fail = [r"test\#test", r"test\\\#test", r"testtest"] basedir = "test_client.Basic.test_comment" os.mkdir(basedir) def write_config(s): config = ("[client]\n" "introducer.furl = %s\n" % s) fileutil.write(os.path.join(basedir, "tahoe.cfg"), config) for s in should_fail: self.failUnless(_Config._contains_unescaped_hash(s)) write_config(s) with self.assertRaises(UnescapedHashError) as ctx: yield client.create_client(basedir) self.assertIn("[client]introducer.furl", str(ctx.exception)) for s in should_not_fail: self.failIf(_Config._contains_unescaped_hash(s)) write_config(s) yield client.create_client(basedir)
def test_loadable(self): basedir = "test_client.Run.test_loadable" os.mkdir(basedir) dummy = "pb://[email protected]:58889/bogus" fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy) fileutil.write(os.path.join(basedir, client._Client.EXIT_TRIGGER_FILE), "") client.create_client(basedir)
def _check(config, expected_furl): fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG + config) c = client.create_client(basedir) uploader = c.getServiceNamed("uploader") furl, connected = uploader.get_helper_info() self.failUnlessEqual(furl, expected_furl)
def test_nodekey_no_storage(self): basedir = "test_client.Basic.test_nodekey_no_storage" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG + "[storage]\n" + "enabled = false\n") c = client.create_client(basedir) self.failUnless(c.get_long_nodeid().startswith("v0-"))
def test_invalid_equals_yaml(self): self.yaml_path.setContent(EQUALS_YAML) with self.assertRaises(TypeError) as ctx: yield create_client(self.basedir) self.assertEquals( str(ctx.exception), "string indices must be integers", )
def test_ok(self): connections = {'introducers': { u'one': { 'furl': 'furl1' }, }} self.yaml_path.setContent(yamlutil.safe_dump(connections)) myclient = create_client(self.basedir) tahoe_cfg_furl = myclient.introducer_furls[0] self.assertEquals(tahoe_cfg_furl, 'furl1')
def test_disabled(self): basedir = "test_node/test_disabled" fileutil.make_dirs(basedir) f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write(BASE_CONFIG) f.write(NOLISTEN) f.write(DISABLE_STORAGE) f.close() n = create_client(basedir) self.assertEqual(n.tub.getListeners(), [])
def test_reserved_2(self): basedir = "client.Basic.test_reserved_2" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = 10K\n") c = client.create_client(basedir) self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000)
def test_nodekey_yes_storage(self): """ We have a nodeid if we're providing storage """ basedir = "test_client.Basic.test_nodekey_yes_storage" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG) c = yield client.create_client(basedir) self.failUnless(c.get_long_nodeid().startswith("v0-"))
def test_introducer_count_commented(self): """ Ensure that the Client creates same number of introducer clients as found in "basedir/private/introducers" config file when there is one commented.""" self.yaml_path.setContent(INTRODUCERS_CFG_FURLS_COMMENTED) # get a client and count of introducer_clients myclient = create_client(self.basedir) ic_count = len(myclient.introducer_clients) # assertions self.failUnlessEqual(ic_count, 2)
def test_ftp_auth_url(self): basedir = u"client.Basic.test_ftp_auth_url" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), (BASECONFIG + "[ftpd]\n" "enabled = true\n" "port = tcp:0:interface=127.0.0.1\n" "accounts.url = http://0.0.0.0/\n")) c = client.create_client(basedir) # just make sure it can be instantiated del c
def _restart(res): # TODO: pause for slightly over one second, to let # Client._check_exit_trigger poll the file once. That will exercise # another few lines. Then add another test in which we don't # update the file at all, and watch to see the node shutdown. # (To do this, use a modified node which overrides Node.shutdown(), # also change _check_exit_trigger to use it instead of a raw # reactor.stop, also instrument the shutdown event in an # attribute that we can check.) c2 = client.create_client(basedir) c2.setServiceParent(self.sparent) return c2.disownServiceParent()
def test_secrets(self): basedir = "test_client.Basic.test_secrets" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG) c = client.create_client(basedir) secret_fname = os.path.join(basedir, "private", "secret") self.failUnless(os.path.exists(secret_fname), secret_fname) renew_secret = c.get_renewal_secret() self.failUnless(base32.b2a(renew_secret)) cancel_secret = c.get_cancel_secret() self.failUnless(base32.b2a(cancel_secret))
def test_create_client_invalid_config(self): with open(os.path.join(self.basedir, 'tahoe.cfg'), 'w') as f: f.write( '[invalid section]\n' 'foo = bar\n' ) with self.assertRaises(UnknownConfigError) as ctx: yield client.create_client(self.basedir) self.assertIn( "invalid section", str(ctx.exception), )
def test_logdir_is_str(self): basedir = "test_node/test_logdir_is_str" ns = Namespace() ns.called = False def call_setLogDir(logdir): ns.called = True self.failUnless(isinstance(logdir, str), logdir) self.patch(foolscap.logging.log, 'setLogDir', call_setLogDir) create_node_dir(basedir, "nothing to see here") yield client.create_client(basedir) self.failUnless(ns.called)
def test_reserved_bad(self): """ reserved_space option produces errors on non-numbers """ basedir = "client.Basic.test_reserved_bad" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = bogus\n") with self.assertRaises(ValueError): yield client.create_client(basedir)
def test_ftp_auth_no_accountfile_or_url(self): """ ftpd requires some way to look up accounts """ basedir = u"client.Basic.test_ftp_auth_no_accountfile_or_url" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), (BASECONFIG + "[ftpd]\n" "enabled = true\n" "port = tcp:0:interface=127.0.0.1\n")) with self.assertRaises(NeedRootcapLookupScheme): yield client.create_client(basedir)
def test_web_staticdir(self): basedir = u"client.Basic.test_web_staticdir" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG + "[node]\n" + "web.port = tcp:0:interface=127.0.0.1\n" + "web.static = relative\n") c = client.create_client(basedir) w = c.getServiceNamed("webish") abs_basedir = fileutil.abspath_expanduser_unicode(basedir) expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir) self.failUnlessReallyEqual(w.staticdir, expected)
def test_ftp_auth_keyfile(self): basedir = u"client.Basic.test_ftp_auth_keyfile" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), (BASECONFIG + "[ftpd]\n" "enabled = true\n" "port = tcp:0:interface=127.0.0.1\n" "accounts.file = private/accounts\n")) os.mkdir(os.path.join(basedir, "private")) fileutil.write(os.path.join(basedir, "private", "accounts"), "\n") c = client.create_client(basedir) # just make sure it can be instantiated del c
def test_reject_default_in_yaml(self): connections = {'introducers': { u'default': { 'furl': 'furl1' }, }} self.yaml_path.setContent(yamlutil.safe_dump(connections)) with self.assertRaises(ValueError) as ctx: yield create_client(self.basedir) self.assertEquals( str(ctx.exception), "'default' introducer furl cannot be specified in introducers.yaml; please " "fix impossible configuration.", )
def test_reserved_1(self): """ reserved_space option is propagated """ basedir = "client.Basic.test_reserved_1" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = 1000\n") c = yield client.create_client(basedir) self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000)
def test_comment(self): should_fail = [r"test#test", r"#testtest", r"test\\#test"] should_not_fail = [r"test\#test", r"test\\\#test", r"testtest"] basedir = "test_client.Basic.test_comment" os.mkdir(basedir) def write_config(s): config = ("[client]\n" "introducer.furl = %s\n" % s) fileutil.write(os.path.join(basedir, "tahoe.cfg"), config) for s in should_fail: self.failUnless(_Config._contains_unescaped_hash(s)) write_config(s) e = self.assertRaises(UnescapedHashError, client.create_client, basedir) self.assertIn("[client]introducer.furl", str(e)) for s in should_not_fail: self.failIf(_Config._contains_unescaped_hash(s)) write_config(s) client.create_client(basedir)
def test_disabled_but_storage(self): basedir = "test_node/test_disabled_but_storage" create_node_dir(basedir, "testing") f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write(BASE_CONFIG) f.write(NOLISTEN) f.write(ENABLE_STORAGE) f.close() with self.assertRaises(ValueError) as ctx: yield client.create_client(basedir) self.assertIn( "storage is enabled, but tub is not listening", str(ctx.exception), )
def test_reserved_4(self): """ reserved_space option understands 'Gb' to mean gigabytes """ basedir = "client.Basic.test_reserved_4" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = 78Gb\n") c = yield client.create_client(basedir) self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 78*1000*1000*1000)
def test_ftp_auth_url(self): """ ftpd accounts.url is parsed properly """ basedir = u"client.Basic.test_ftp_auth_url" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), (BASECONFIG + "[ftpd]\n" "enabled = true\n" "port = tcp:0:interface=127.0.0.1\n" "accounts.url = http://0.0.0.0/\n")) c = yield client.create_client(basedir) # just make sure it can be instantiated del c
def test_reserved_4(self): """ reserved_space option understands 'Gb' to mean gigabytes """ basedir = "client.Basic.test_reserved_4" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = 78Gb\n") c = yield client.create_client(basedir) self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 78*1000*1000*1000)
def test_disabled_but_storage(self): basedir = "test_node/test_disabled_but_storage" create_node_dir(basedir, "testing") f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write(BASE_CONFIG) f.write(NOLISTEN) f.write(ENABLE_STORAGE) f.close() with self.assertRaises(ValueError) as ctx: yield client.create_client(basedir) self.assertIn( "storage is enabled, but tub is not listening", str(ctx.exception), )
def _set_up_client_nodes(self): q = self.introducer self.introducer_furl = q.introducer_url self.clients = [] basedirs = [] for i in range(self.numclients): basedirs.append((yield self._set_up_client_node(i))) # start clients[0], wait for it's tub to be ready (at which point it # will have registered the helper furl). c = yield client.create_client(basedirs[0]) c.setServiceParent(self.sparent) self.clients.append(c) with open(os.path.join(basedirs[0], "private", "helper.furl"), "r") as f: helper_furl = f.read() self.helper_furl = helper_furl if self.numclients >= 4: with open(os.path.join(basedirs[3], 'tahoe.cfg'), 'a+') as f: f.write("[client]\n" "helper.furl = {}\n".format(helper_furl)) # this starts the rest of the clients for i in range(1, self.numclients): c = yield client.create_client(basedirs[i]) c.setServiceParent(self.sparent) self.clients.append(c) log.msg("STARTING") yield self.wait_for_connections() log.msg("CONNECTED") # now find out where the web port was self.webish_url = self.clients[0].getServiceNamed("webish").getURL() if self.numclients >= 4: # and the helper-using webport self.helper_webish_url = self.clients[3].getServiceNamed( "webish").getURL()
def test_client(self): basedir = "introducer/ClientSeqnums/test_client" fileutil.make_dirs(basedir) # if storage is enabled, the Client will publish its storage server # during startup (although the announcement will wait in a queue # until the introducer connection is established). To avoid getting # confused by this, disable storage. f = open(os.path.join(basedir, "tahoe.cfg"), "w") f.write("[client]\n") f.write("introducer.furl = nope\n") f.write("[storage]\n") f.write("enabled = false\n") f.close() c = yield create_client(basedir) ic = c.introducer_clients[0] outbound = ic._outbound_announcements published = ic._published_announcements def read_seqnum(): f = open(os.path.join(basedir, "announcement-seqnum")) seqnum = f.read().strip() f.close() return int(seqnum) ic.publish("sA", {"key": "value1"}, c._node_private_key) self.failUnlessEqual(read_seqnum(), 1) self.failUnless("sA" in outbound) self.failUnlessEqual(outbound["sA"]["seqnum"], 1) nonce1 = outbound["sA"]["nonce"] self.failUnless(isinstance(nonce1, str)) self.failUnlessEqual(json.loads(published["sA"][0]), outbound["sA"]) # [1] is the signature, [2] is the pubkey # publishing a second service causes both services to be # re-published, with the next higher sequence number ic.publish("sB", {"key": "value2"}, c._node_private_key) self.failUnlessEqual(read_seqnum(), 2) self.failUnless("sB" in outbound) self.failUnlessEqual(outbound["sB"]["seqnum"], 2) self.failUnless("sA" in outbound) self.failUnlessEqual(outbound["sA"]["seqnum"], 2) nonce2 = outbound["sA"]["nonce"] self.failUnless(isinstance(nonce2, str)) self.failIfEqual(nonce1, nonce2) self.failUnlessEqual(json.loads(published["sA"][0]), outbound["sA"]) self.failUnlessEqual(json.loads(published["sB"][0]), outbound["sB"])
def test_client(self): basedir = FilePath("introducer/ClientSeqnums/test_client") private = basedir.child("private") private.makedirs() write_introducer(basedir, "default", "nope") # if storage is enabled, the Client will publish its storage server # during startup (although the announcement will wait in a queue # until the introducer connection is established). To avoid getting # confused by this, disable storage. with basedir.child("tahoe.cfg").open("w") as f: f.write(b"[storage]\n") f.write(b"enabled = false\n") c = yield create_client(basedir.path) ic = c.introducer_clients[0] outbound = ic._outbound_announcements published = ic._published_announcements def read_seqnum(): seqnum = basedir.child("announcement-seqnum").getContent() return int(seqnum) ic.publish("sA", {"key": "value1"}, c._node_private_key) self.failUnlessEqual(read_seqnum(), 1) self.failUnless("sA" in outbound) self.failUnlessEqual(outbound["sA"]["seqnum"], 1) nonce1 = outbound["sA"]["nonce"] self.failUnless(isinstance(nonce1, bytes)) # Make nonce unicode, to match JSON: outbound["sA"]["nonce"] = str(nonce1, "utf-8") self.failUnlessEqual(json.loads(published["sA"][0]), outbound["sA"]) # [1] is the signature, [2] is the pubkey # publishing a second service causes both services to be # re-published, with the next higher sequence number ic.publish("sB", {"key": "value2"}, c._node_private_key) self.failUnlessEqual(read_seqnum(), 2) self.failUnless("sB" in outbound) self.failUnlessEqual(outbound["sB"]["seqnum"], 2) self.failUnless("sA" in outbound) self.failUnlessEqual(outbound["sA"]["seqnum"], 2) nonce2 = outbound["sA"]["nonce"] self.failUnless(isinstance(nonce2, bytes)) self.failIfEqual(nonce1, nonce2) # Make nonce unicode, to match JSON: outbound["sA"]["nonce"] = str(nonce2, "utf-8") outbound["sB"]["nonce"] = str(outbound["sB"]["nonce"], "utf-8") self.failUnlessEqual(json.loads(published["sA"][0]), outbound["sA"]) self.failUnlessEqual(json.loads(published["sB"][0]), outbound["sB"])
def test_web_apiauthtoken(self): """ Client loads the proper API auth token from disk """ basedir = u"client.Basic.test_web_apiauthtoken" create_node_dir(basedir, "testing") c = yield client.create_client(basedir) # this must come after we create the client, as it will create # a new, random authtoken itself with open(os.path.join(basedir, "private", "api_auth_token"), "w") as f: f.write("deadbeef") token = c.get_auth_token() self.assertEqual("deadbeef", token)
def test_web_apiauthtoken(self): """ Client loads the proper API auth token from disk """ basedir = u"client.Basic.test_web_apiauthtoken" create_node_dir(basedir, "testing") c = yield client.create_client(basedir) # this must come after we create the client, as it will create # a new, random authtoken itself with open(os.path.join(basedir, "private", "api_auth_token"), "w") as f: f.write("deadbeef") token = c.get_auth_token() self.assertEqual("deadbeef", token)
def test_logdir_is_str(self): basedir = "test_node/test_logdir_is_str" ns = Namespace() ns.called = False def call_setLogDir(logdir): ns.called = True self.failUnless(isinstance(logdir, str), logdir) self.patch(foolscap.logging.log, 'setLogDir', call_setLogDir) create_node_dir(basedir, "nothing to see here") yield client.create_client(basedir) self.failUnless(ns.called)
def test_ftp_create(self): """ configuration for sftpd results in it being started """ basedir = u"client.Basic.test_ftp_create" create_node_dir(basedir, "testing") with open(os.path.join(basedir, "tahoe.cfg"), "w") as f: f.write('[sftpd]\n' 'enabled = true\n' 'accounts.file = foo\n' 'host_pubkey_file = pubkey\n' 'host_privkey_file = privkey\n') with mock.patch('allmydata.frontends.sftpd.SFTPServer') as p: yield client.create_client(basedir) self.assertTrue(p.called)
def test_secrets(self): """ A new client has renewal + cancel secrets """ basedir = "test_client.Basic.test_secrets" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG) c = yield client.create_client(basedir) secret_fname = os.path.join(basedir, "private", "secret") self.failUnless(os.path.exists(secret_fname), secret_fname) renew_secret = c.get_renewal_secret() self.failUnless(base32.b2a(renew_secret)) cancel_secret = c.get_cancel_secret() self.failUnless(base32.b2a(cancel_secret))
def test_web_staticdir(self): """ a relative web.static dir is expanded properly """ basedir = u"client.Basic.test_web_staticdir" os.mkdir(basedir) fileutil.write( os.path.join(basedir, "tahoe.cfg"), BASECONFIG + "[node]\n" + "web.port = tcp:0:interface=127.0.0.1\n" + "web.static = relative\n") c = yield client.create_client(basedir) w = c.getServiceNamed("webish") abs_basedir = fileutil.abspath_expanduser_unicode(basedir) expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir) self.failUnlessReallyEqual(w.staticdir, expected)
def test_unreadable_introducers(self): """ The Deferred from create_client fails when private/introducers.yaml is unreadable (but exists) """ basedir = "test_client.Basic.test_unreadable_introduers" os.mkdir(basedir, 0o700) os.mkdir(os.path.join(basedir, 'private'), 0o700) intro_fname = os.path.join(basedir, 'private', 'introducers.yaml') with open(intro_fname, 'w') as f: f.write("---\n") os.chmod(intro_fname, 0o000) self.addCleanup(lambda: os.chmod(intro_fname, 0o700)) with self.assertRaises(EnvironmentError): yield client.create_client(basedir)
def test_introducer_count(self): """ Ensure that the Client creates same number of introducer clients as found in "basedir/private/introducers" config file. """ connections = {'introducers': { u'intro1':{ 'furl': 'furl1' }, u'intro2':{ 'furl': 'furl4' } }, } self.yaml_path.setContent(yamlutil.safe_dump(connections)) # get a client and count of introducer_clients myclient = create_client(self.basedir) ic_count = len(myclient.introducer_clients) # assertions self.failUnlessEqual(ic_count, 3)
def test_ftp_auth_keyfile(self): """ ftpd accounts.file is parsed properly """ basedir = u"client.Basic.test_ftp_auth_keyfile" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), (BASECONFIG + "[ftpd]\n" "enabled = true\n" "port = tcp:0:interface=127.0.0.1\n" "accounts.file = private/accounts\n")) os.mkdir(os.path.join(basedir, "private")) fileutil.write(os.path.join(basedir, "private", "accounts"), "\n") c = yield client.create_client( basedir) # just make sure it can be instantiated del c
def test_unreadable_introducers(self): """ The Deferred from create_client fails when private/introducers.yaml is unreadable (but exists) """ basedir = "test_client.Basic.test_unreadable_introduers" os.mkdir(basedir, 0o700) os.mkdir(os.path.join(basedir, 'private'), 0o700) intro_fname = os.path.join(basedir, 'private', 'introducers.yaml') with open(intro_fname, 'w') as f: f.write("---\n") os.chmod(intro_fname, 0o000) self.addCleanup(lambda: os.chmod(intro_fname, 0o700)) with self.assertRaises(EnvironmentError): yield client.create_client(basedir)
def test_reject_default_in_yaml(self): connections = { 'introducers': { u'default': { 'furl': 'furl1' }, } } self.yaml_path.setContent(yamlutil.safe_dump(connections)) with self.assertRaises(ValueError) as ctx: yield create_client(self.basedir) self.assertEquals( str(ctx.exception), "'default' introducer furl cannot be specified in introducers.yaml; please " "fix impossible configuration.", )
def test_ftp_create(self): """ configuration for sftpd results in it being started """ basedir = u"client.Basic.test_ftp_create" create_node_dir(basedir, "testing") with open(os.path.join(basedir, "tahoe.cfg"), "w") as f: f.write( '[sftpd]\n' 'enabled = true\n' 'accounts.file = foo\n' 'host_pubkey_file = pubkey\n' 'host_privkey_file = privkey\n' ) with mock.patch('allmydata.frontends.sftpd.SFTPServer') as p: yield client.create_client(basedir) self.assertTrue(p.called)
def _storage_dir_test(self, basedir, storage_path, expected_path): os.mkdir(basedir) cfg_path = os.path.join(basedir, "tahoe.cfg") fileutil.write( cfg_path, BASECONFIG + "[storage]\n" "enabled = true\n", ) if storage_path is not None: fileutil.write( cfg_path, "storage_dir = %s\n" % (storage_path,), mode="ab", ) c = client.create_client(basedir) self.assertEqual( c.getServiceNamed("storage").storedir, expected_path, )
def test_read_introducer_furl_from_tahoecfg(self): """ Ensure that the Client reads the introducer.furl config item from the tahoe.cfg file. """ # create a custom tahoe.cfg c = open(os.path.join(self.basedir, "tahoe.cfg"), "w") config = {'hide-ip':False, 'listen': 'tcp', 'port': None, 'location': None, 'hostname': 'example.net'} write_node_config(c, config) fake_furl = "furl1" c.write("[client]\n") c.write("introducer.furl = %s\n" % fake_furl) c.write("[storage]\n") c.write("enabled = false\n") c.close() # get a client and first introducer_furl myclient = create_client(self.basedir) tahoe_cfg_furl = myclient.introducer_furls[0] # assertions self.failUnlessEqual(fake_furl, tahoe_cfg_furl)
def test_versions(self): basedir = "test_client.Basic.test_versions" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n") c = client.create_client(basedir) ss = c.getServiceNamed("storage") verdict = ss.remote_get_version() self.failUnlessReallyEqual(verdict["application-version"], str(allmydata.__full_version__)) self.failIfEqual(str(allmydata.__version__), "unknown") self.failUnless("." in str(allmydata.__full_version__), "non-numeric version in '%s'" % allmydata.__version__) all_versions = allmydata.get_package_versions_string() self.failUnless(allmydata.__appname__ in all_versions) # also test stats stats = c.get_stats() self.failUnless("node.uptime" in stats) self.failUnless(isinstance(stats["node.uptime"], float))
def test_introducer_count(self): """ If there are two introducers configured in ``introducers.yaml`` then ``Client`` creates two introducer clients. """ connections = { 'introducers': { u'intro1': { 'furl': 'furl1' }, u'intro2': { 'furl': 'furl4' }, }, } self.yaml_path.setContent(yamlutil.safe_dump(connections)) # get a client and count of introducer_clients myclient = yield create_client(self.basedir) ic_count = len(myclient.introducer_clients) # assertions self.failUnlessEqual(ic_count, len(connections["introducers"]))
def _storage_dir_test(self, basedir, storage_path, expected_path): """ generic helper for following storage_dir tests """ os.mkdir(basedir) cfg_path = os.path.join(basedir, "tahoe.cfg") fileutil.write( cfg_path, BASECONFIG + "[storage]\n" "enabled = true\n", ) if storage_path is not None: fileutil.write( cfg_path, "storage_dir = %s\n" % (storage_path, ), mode="ab", ) c = yield client.create_client(basedir) self.assertEqual( c.getServiceNamed("storage").storedir, expected_path, )
def test_invalid_static_server(self, logger): """ An invalid announcement for a static server does not prevent other static servers from being loaded. """ # Some good details serverid = u"v1-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" announcement = { u"nickname": u"some-storage-server", u"anonymous-storage-FURL": u"pb://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@tcp:storage.example:100/swissnum", } self.useFixture( StaticServers( self.basedir, [ (serverid, announcement), # Along with a "bad" server announcement. Order in this list # doesn't matter, yaml serializer and Python dicts are going # to shuffle everything around kind of randomly. (u"v0-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", { u"nickname": u"another-storage-server", u"anonymous-storage-FURL": None, }), ], ), ) self.assertThat( client.create_client(self.basedir.asTextMode().path), succeeded( AfterPreprocessing( get_known_server_details, # It should have the good server details. Equals([(serverid, announcement)]), ), ), )
def test_loadable(self): basedir = "test_client.Basic.test_loadable" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG) return client.create_client(basedir)
def test_client_cache(self): """ Announcements received by an introducer client are written to that introducer client's cache file. """ basedir = FilePath("introducer/ClientSeqnums/test_client_cache_1") private = basedir.child("private") private.makedirs() write_introducer(basedir, "default", "nope") cache_filepath = basedir.descendant([ "private", "introducer_default_cache.yaml", ]) # if storage is enabled, the Client will publish its storage server # during startup (although the announcement will wait in a queue # until the introducer connection is established). To avoid getting # confused by this, disable storage. with basedir.child("tahoe.cfg").open("w") as f: f.write(b"[storage]\n") f.write(b"enabled = false\n") c = yield create_client(basedir.path) ic = c.introducer_clients[0] private_key, public_key = ed25519.create_signing_keypair() public_key_str = remove_prefix( ed25519.string_from_verifying_key(public_key), b"pub-") furl1 = b"pb://[email protected]:123/short" # base32("short") ann_t = make_ann_t(ic, furl1, private_key, 1) ic.got_announcements([ann_t]) yield flushEventualQueue() # check the cache for the announcement announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(ensure_binary(announcements[0]['key_s']), public_key_str) ann = announcements[0]["ann"] self.failUnlessEqual(ensure_binary(ann["anonymous-storage-FURL"]), furl1) self.failUnlessEqual(ann["seqnum"], 1) # a new announcement that replaces the first should replace the # cached entry, not duplicate it furl2 = furl1 + b"er" ann_t2 = make_ann_t(ic, furl2, private_key, 2) ic.got_announcements([ann_t2]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(ensure_binary(announcements[0]['key_s']), public_key_str) ann = announcements[0]["ann"] self.failUnlessEqual(ensure_binary(ann["anonymous-storage-FURL"]), furl2) self.failUnlessEqual(ann["seqnum"], 2) # but a third announcement with a different key should add to the # cache private_key2, public_key2 = ed25519.create_signing_keypair() public_key_str2 = remove_prefix( ed25519.string_from_verifying_key(public_key2), b"pub-") furl3 = b"pb://[email protected]:456/short" ann_t3 = make_ann_t(ic, furl3, private_key2, 1) ic.got_announcements([ann_t3]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual( set([public_key_str, public_key_str2]), set([ensure_binary(a["key_s"]) for a in announcements])) self.failUnlessEqual( set([furl2, furl3]), set([ ensure_binary(a["ann"]["anonymous-storage-FURL"]) for a in announcements ])) # test loading yield flushEventualQueue() ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", fakeseq, ic._cache_filepath) announcements = {} def got(key_s, ann): announcements[key_s] = ann ic2.subscribe_to("storage", got) ic2._load_announcements() # normally happens when connection fails yield flushEventualQueue() self.failUnless(public_key_str in announcements) self.failUnlessEqual( ensure_binary( announcements[public_key_str]["anonymous-storage-FURL"]), furl2) self.failUnlessEqual( ensure_binary( announcements[public_key_str2]["anonymous-storage-FURL"]), furl3) c2 = yield create_client(basedir.path) c2.introducer_clients[0]._load_announcements() yield flushEventualQueue() self.assertEqual(c2.storage_broker.get_all_serverids(), frozenset([public_key_str, public_key_str2]))
def test_introducerless(self): connections = {'introducers': {} } self.yaml_path.setContent(yamlutil.safe_dump(connections)) myclient = create_client(self.basedir) self.assertEquals(len(myclient.introducer_furls), 0)
def test_real_yaml(self): self.yaml_path.setContent(SIMPLE_YAML) myclient = create_client(self.basedir) tahoe_cfg_furl = myclient.introducer_furls[0] self.assertEquals(tahoe_cfg_furl, 'furl1')
def test_nodekey_yes_storage(self): basedir = "test_client.Basic.test_nodekey_yes_storage" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG) c = client.create_client(basedir) self.failUnless(c.get_long_nodeid().startswith("v0-"))
def test_maker(self): basedir = "client/NodeMaker/maker" fileutil.make_dirs(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG) c = yield client.create_client(basedir) n = c.create_node_from_uri( "URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failUnless(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failIf(n.is_mutable()) # Testing #1679. There was a bug that would occur when downloader was # downloading the same readcap more than once concurrently, so the # filenode object was cached, and there was a failure from one of the # servers in one of the download attempts. No subsequent download # attempt would attempt to use that server again, which would lead to # the file being undownloadable until the gateway was restarted. The # current fix for this (hopefully to be superceded by a better fix # eventually) is to prevent re-use of filenodes, so the NodeMaker is # hereby required *not* to cache and re-use filenodes for CHKs. other_n = c.create_node_from_uri( "URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277" ) self.failIf(n is other_n, (n, other_n)) n = c.create_node_from_uri("URI:LIT:n5xgk") self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failUnless(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failIf(n.is_mutable()) n = c.create_node_from_uri( "URI:SSK:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failUnless(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failIf(n.is_readonly()) self.failUnless(n.is_mutable()) n = c.create_node_from_uri( "URI:SSK-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failUnless(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failUnless(n.is_mutable()) n = c.create_node_from_uri( "URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failUnless(IDirectoryNode.providedBy(n)) self.failIf(n.is_readonly()) self.failUnless(n.is_mutable()) n = c.create_node_from_uri( "URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failUnless(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failUnless(n.is_mutable()) unknown_rw = "lafs://from_the_future" unknown_ro = "lafs://readonly_from_the_future" n = c.create_node_from_uri(unknown_rw, unknown_ro) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_unknown()) self.failUnlessReallyEqual(n.get_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro) # Note: it isn't that we *intend* to deploy non-ASCII caps in # the future, it is that we want to make sure older Tahoe-LAFS # versions wouldn't choke on them if we were to do so. See # #1051 and wiki:NewCapDesign for details. unknown_rw = u"lafs://from_the_future_rw_\u263A".encode('utf-8') unknown_ro = u"lafs://readonly_from_the_future_ro_\u263A".encode( 'utf-8') n = c.create_node_from_uri(unknown_rw, unknown_ro) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_unknown()) self.failUnlessReallyEqual(n.get_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)
def _stopped(res): new_c = yield client.create_client(self.getdir("client%d" % num)) self.clients[num] = new_c new_c.setServiceParent(self.sparent)
def test_client_cache(self): basedir = "introducer/ClientSeqnums/test_client_cache_1" fileutil.make_dirs(basedir) cache_filepath = FilePath( os.path.join(basedir, "private", "introducer_default_cache.yaml")) # if storage is enabled, the Client will publish its storage server # during startup (although the announcement will wait in a queue # until the introducer connection is established). To avoid getting # confused by this, disable storage. f = open(os.path.join(basedir, "tahoe.cfg"), "w") f.write("[client]\n") f.write("introducer.furl = nope\n") f.write("[storage]\n") f.write("enabled = false\n") f.close() c = create_client(basedir) ic = c.introducer_clients[0] sk_s, vk_s = keyutil.make_keypair() sk, _ignored = keyutil.parse_privkey(sk_s) pub1 = keyutil.remove_prefix(vk_s, "pub-") furl1 = "pb://[email protected]:123/short" # base32("short") ann_t = make_ann_t(ic, furl1, sk, 1) ic.got_announcements([ann_t]) yield flushEventualQueue() # check the cache for the announcement announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]['key_s'], pub1) ann = announcements[0]["ann"] self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["seqnum"], 1) # a new announcement that replaces the first should replace the # cached entry, not duplicate it furl2 = furl1 + "er" ann_t2 = make_ann_t(ic, furl2, sk, 2) ic.got_announcements([ann_t2]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(announcements[0]['key_s'], pub1) ann = announcements[0]["ann"] self.failUnlessEqual(ann["anonymous-storage-FURL"], furl2) self.failUnlessEqual(ann["seqnum"], 2) # but a third announcement with a different key should add to the # cache sk_s2, vk_s2 = keyutil.make_keypair() sk2, _ignored = keyutil.parse_privkey(sk_s2) pub2 = keyutil.remove_prefix(vk_s2, "pub-") furl3 = "pb://[email protected]:456/short" ann_t3 = make_ann_t(ic, furl3, sk2, 1) ic.got_announcements([ann_t3]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual(set([pub1, pub2]), set([a["key_s"] for a in announcements])) self.failUnlessEqual( set([furl2, furl3]), set([a["ann"]["anonymous-storage-FURL"] for a in announcements])) # test loading yield flushEventualQueue() ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", {}, fakeseq, ic._cache_filepath) announcements = {} def got(key_s, ann): announcements[key_s] = ann ic2.subscribe_to("storage", got) ic2._load_announcements() # normally happens when connection fails yield flushEventualQueue() self.failUnless(pub1 in announcements) self.failUnlessEqual(announcements[pub1]["anonymous-storage-FURL"], furl2) self.failUnlessEqual(announcements[pub2]["anonymous-storage-FURL"], furl3) c2 = create_client(basedir) c2.introducer_clients[0]._load_announcements() yield flushEventualQueue() self.assertEqual(c2.storage_broker.get_all_serverids(), frozenset([pub1, pub2]))
def test_create_magic_folder_service(self): """ providing magic-folder options actually creates a MagicFolder service """ boom = False class Boom(Exception): pass class MockMagicFolder(allmydata.frontends.magic_folder.MagicFolder): name = 'magic-folder' def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile, umask, name, inotify=None, uploader_delay=1.0, clock=None, downloader_delay=3): if boom: raise Boom() service.MultiService.__init__(self) self.client = client self._umask = umask self.upload_dircap = upload_dircap self.collective_dircap = collective_dircap self.local_dir = local_path_u self.dbfile = dbfile self.inotify = inotify def startService(self): self.running = True def stopService(self): self.running = False def ready(self): pass self.patch(allmydata.frontends.magic_folder, 'MagicFolder', MockMagicFolder) upload_dircap = "URI:DIR2:blah" local_dir_u = self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir") local_dir_utf8 = local_dir_u.encode('utf-8') config = (BASECONFIG + "[storage]\n" + "enabled = false\n" + "[magic_folder]\n" + "enabled = true\n") basedir1 = "test_client.Basic.test_create_magic_folder_service1" os.mkdir(basedir1) os.mkdir(local_dir_u) # which config-entry should be missing? fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config + "local.directory = " + local_dir_utf8 + "\n") with self.assertRaises(IOError): yield client.create_client(basedir1) # local.directory entry missing .. but that won't be an error # now, it'll just assume there are not magic folders # .. hrm...should we make that an error (if enabled=true but # there's not yaml AND no local.directory?) fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config) fileutil.write( os.path.join(basedir1, "private", "magic_folder_dircap"), "URI:DIR2:blah") fileutil.write(os.path.join(basedir1, "private", "collective_dircap"), "URI:DIR2:meow") fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config.replace("[magic_folder]\n", "[drop_upload]\n")) with self.assertRaises(OldConfigOptionError): yield client.create_client(basedir1) fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config + "local.directory = " + local_dir_utf8 + "\n") c1 = yield client.create_client(basedir1) magicfolder = c1.getServiceNamed('magic-folder') self.failUnless(isinstance(magicfolder, MockMagicFolder), magicfolder) self.failUnlessReallyEqual(magicfolder.client, c1) self.failUnlessReallyEqual(magicfolder.upload_dircap, upload_dircap) self.failUnlessReallyEqual(os.path.basename(magicfolder.local_dir), local_dir_u) self.failUnless(magicfolder.inotify is None, magicfolder.inotify) # It doesn't start until the client starts. self.assertFalse(magicfolder.running) # See above. boom = True basedir2 = "test_client.Basic.test_create_magic_folder_service2" os.mkdir(basedir2) os.mkdir(os.path.join(basedir2, "private")) fileutil.write( os.path.join(basedir2, "tahoe.cfg"), BASECONFIG + "[magic_folder]\n" + "enabled = true\n" + "local.directory = " + local_dir_utf8 + "\n") fileutil.write( os.path.join(basedir2, "private", "magic_folder_dircap"), "URI:DIR2:blah") fileutil.write(os.path.join(basedir2, "private", "collective_dircap"), "URI:DIR2:meow") with self.assertRaises(Boom): yield client.create_client(basedir2)