def test_paced_service(self): self.basedir = "crawler/Basic/paced_service" fileutil.make_dirs(self.basedir) serverid = "\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) sis = [self.write(i, ss, serverid) for i in range(10)] statefile = os.path.join(self.basedir, "statefile") c = PacedCrawler(ss, statefile) did_check_progress = [False] def check_progress(): c.yield_cb = None try: p = c.get_progress() self.failUnlessEqual(p["cycle-in-progress"], True) pct = p["cycle-complete-percentage"] # after 6 buckets, we happen to be at 76.17% complete. As # long as we create shares in deterministic order, this will # continue to be true. self.failUnlessEqual(int(pct), 76) left = p["remaining-sleep-time"] self.failUnless(isinstance(left, float), left) self.failUnless(left > 0.0, left) except Exception, e: did_check_progress[0] = e else:
def test_empty_subclass(self): self.basedir = "crawler/Basic/empty_subclass" fileutil.make_dirs(self.basedir) serverid = "\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) for i in range(10): self.write(i, ss, serverid) statefile = os.path.join(self.basedir, "statefile") c = ShareCrawler(ss, statefile) c.slow_start = 0 c.setServiceParent(self.s) # we just let it run for a while, to get figleaf coverage of the # empty methods in the base class def _check(): return bool(c.state["last-cycle-finished"] is not None) d = self.poll(_check) def _done(ignored): state = c.get_state() self.failUnless(state["last-cycle-finished"] is not None) d.addCallback(_done) return d
def test_service(self): self.basedir = "crawler/Basic/service" fileutil.make_dirs(self.basedir) serverid = "\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) sis = [self.write(i, ss, serverid) for i in range(10)] statefile = os.path.join(self.basedir, "statefile") c = BucketEnumeratingCrawler(ss, statefile) c.setServiceParent(self.s) # it should be legal to call get_state() and get_progress() right # away, even before the first tick is performed. No work should have # been done yet. s = c.get_state() p = c.get_progress() self.failUnlessEqual(s["last-complete-prefix"], None) self.failUnlessEqual(s["current-cycle"], None) self.failUnlessEqual(p["cycle-in-progress"], False) d = c.finished_d def _check(ignored): self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) d.addCallback(_check) return d
def test_immediate(self): self.basedir = "crawler/Basic/immediate" fileutil.make_dirs(self.basedir) serverid = "\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) sis = [self.write(i, ss, serverid) for i in range(10)] statefile = os.path.join(self.basedir, "statefile") c = BucketEnumeratingCrawler(ss, statefile, allowed_cpu_percentage=.1) c.load_state() c.start_current_prefix(time.time()) self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) # make sure the statefile has been returned to the starting point c.finished_d = defer.Deferred() c.all_buckets = [] c.start_current_prefix(time.time()) self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) # check that a new crawler picks up on the state file properly c2 = BucketEnumeratingCrawler(ss, statefile) c2.load_state() c2.start_current_prefix(time.time()) self.failUnlessEqual(sorted(sis), sorted(c2.all_buckets))
def test_private_config(self): basedir = u"test_node/test_private_config" privdir = os.path.join(basedir, "private") fileutil.make_dirs(privdir) f = open(os.path.join(privdir, 'already'), 'wt') f.write("secret") f.close() basedir = fileutil.abspath_expanduser_unicode(basedir) config = config_from_string(basedir, "", "") self.assertEqual(config.get_private_config("already"), "secret") self.assertEqual(config.get_private_config("not", "default"), "default") self.assertRaises(MissingConfigEntry, config.get_private_config, "not") value = config.get_or_create_private_config("new", "start") self.assertEqual(value, "start") self.assertEqual(config.get_private_config("new"), "start") counter = [] def make_newer(): counter.append("called") return "newer" value = config.get_or_create_private_config("newer", make_newer) self.assertEqual(len(counter), 1) self.assertEqual(value, "newer") self.assertEqual(config.get_private_config("newer"), "newer") value = config.get_or_create_private_config("newer", make_newer) self.assertEqual(len(counter), 1) # don't call unless necessary self.assertEqual(value, "newer")
def test_oneshot(self): self.basedir = "crawler/Basic/oneshot" fileutil.make_dirs(self.basedir) serverid = "\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) for i in range(30): self.write(i, ss, serverid) statefile = os.path.join(self.basedir, "statefile") c = OneShotCrawler(ss, statefile) c.setServiceParent(self.s) d = c.finished_d def _finished_first_cycle(ignored): return fireEventually(c.counter) d.addCallback(_finished_first_cycle) def _check(old_counter): # the crawler should do any work after it's been stopped self.failUnlessEqual(old_counter, c.counter) self.failIf(c.running) self.failIf(c.timer) self.failIf(c.current_sleep_time) s = c.get_state() self.failUnlessEqual(s["last-cycle-finished"], 0) self.failUnlessEqual(s["current-cycle"], None) d.addCallback(_check) return d
def start(config, out=sys.stdout, err=sys.stderr): basedir = config['basedir'] quoted_basedir = quote_local_unicode_path(basedir) print >>out, "STARTING", quoted_basedir if not os.path.isdir(basedir): print >>err, "%s does not look like a directory at all" % quoted_basedir return 1 nodetype = identify_node_type(basedir) if not nodetype: print >>err, "%s is not a recognizable node directory" % quoted_basedir return 1 # Now prepare to turn into a twistd process. This os.chdir is the point # of no return. os.chdir(basedir) twistd_args = [] if (nodetype in ("client", "introducer") and "--nodaemon" not in config.twistd_args and "--syslog" not in config.twistd_args and "--logfile" not in config.twistd_args): fileutil.make_dirs(os.path.join(basedir, u"logs")) twistd_args.extend(["--logfile", os.path.join("logs", "twistd.log")]) twistd_args.extend(config.twistd_args) twistd_args.append("StartTahoeNode") # point at our StartTahoeNodePlugin twistd_config = MyTwistdConfig() try: twistd_config.parseOptions(twistd_args) except usage.error, ue: # these arguments were unsuitable for 'twistd' print >>err, config print >>err, "tahoe %s: usage error from twistd: %s\n" % (config.subcommand_name, ue) return 1
def test_baddir(self): self.skip_if_cannot_daemonize() basedir = self.workdir("test_baddir") fileutil.make_dirs(basedir) d = self.run_bintahoe(["--quiet", "start", "--basedir", basedir]) def _cb(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 1) self.failUnless("is not a recognizable node directory" in err, err) d.addCallback(_cb) def _then_stop_it(res): return self.run_bintahoe(["--quiet", "stop", "--basedir", basedir]) d.addCallback(_then_stop_it) def _cb2(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 2) self.failUnless("does not look like a running node directory" in err) d.addCallback(_cb2) def _then_start_in_bogus_basedir(res): not_a_dir = os.path.join(basedir, "bogus") return self.run_bintahoe(["--quiet", "start", "--basedir", not_a_dir]) d.addCallback(_then_start_in_bogus_basedir) def _cb3(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 1) self.failUnlessIn("does not look like a directory at all", err) d.addCallback(_cb3) return d
def setUpHelper(self, basedir, helper_class=Helper_fake_upload): fileutil.make_dirs(basedir) self.helper = h = helper_class(basedir, self.s.storage_broker, self.s.secret_holder, None, None) self.helper_furl = self.tub.registerReference(h)
def test_fail(self): self.basedir = basedir = os.path.join("backupdb", "fail") fileutil.make_dirs(basedir) # put a non-DB file in the way not_a_db = ("I do not look like a sqlite database\n" + "I'M NOT" * 1000) # OS-X sqlite-2.3.2 takes some convincing self.writeto("not-a-database", not_a_db) stderr_f = StringIO() bdb = backupdb.get_backupdb(os.path.join(basedir, "not-a-database"), stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() self.failUnlessIn("backupdb file is unusable", stderr) self.failUnlessIn("file is encrypted or is not a database", stderr) # put a directory in the way, to exercise a different error path where = os.path.join(basedir, "roadblock-dir") fileutil.make_dirs(where) stderr_f = StringIO() bdb = backupdb.get_backupdb(where, stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() self.failUnlessIn("Unable to create/open backupdb file %s" % (where,), stderr) self.failUnlessIn("unable to open database file", stderr)
def create_node(config): out = config.stdout err = config.stderr basedir = config['basedir'] # This should always be called with an absolute Unicode basedir. precondition(isinstance(basedir, unicode), basedir) if os.path.exists(basedir): if listdir_unicode(basedir): print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir) print >>err, "To avoid clobbering anything, I am going to quit now." print >>err, "Please use a different directory, or empty this one." defer.returnValue(-1) # we're willing to use an empty directory else: os.mkdir(basedir) write_tac(basedir, "client") fileutil.make_dirs(os.path.join(basedir, "private"), 0700) with open(os.path.join(basedir, "tahoe.cfg"), "w") as c: yield write_node_config(c, config) write_client_config(c, config) print >>out, "Node created in %s" % quote_local_unicode_path(basedir) if not config.get("introducer", ""): print >>out, " Please set [client]introducer.furl= in tahoe.cfg!" print >>out, " The node cannot connect to a grid without it." if not config.get("nickname", ""): print >>out, " Please set [node]nickname= in tahoe.cfg" defer.returnValue(0)
def test_baddir(self): self.skip_if_cannot_daemonize() basedir = self.workdir("test_baddir") fileutil.make_dirs(basedir) d = utils.getProcessOutputAndValue(bintahoe, args=["--quiet", "start", "--basedir", basedir], env=os.environ) def _cb(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 1) self.failUnless("does not look like a node directory" in err, err) d.addCallback(_cb) def _then_stop_it(res): return utils.getProcessOutputAndValue(bintahoe, args=["--quiet", "stop", "--basedir", basedir], env=os.environ) d.addCallback(_then_stop_it) def _cb2(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 2) self.failUnless("does not look like a running node directory" in err) d.addCallback(_cb2) def _then_start_in_bogus_basedir(res): not_a_dir = os.path.join(basedir, "bogus") return utils.getProcessOutputAndValue(bintahoe, args=["--quiet", "start", "--basedir", not_a_dir], env=os.environ) d.addCallback(_then_start_in_bogus_basedir) def _cb3(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 1) self.failUnless("does not look like a directory at all" in err, err) d.addCallback(_cb3) return d
def test_basic(self): self.basedir = basedir = os.path.join("backupdb", "create") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create_or_skip(dbfile) self.failUnless(bdb) self.failUnlessEqual(bdb.VERSION, 2)
def test_unicode(self): skip_if_cannot_represent_filename(u"f\u00f6\u00f6.txt") skip_if_cannot_represent_filename(u"b\u00e5r.txt") self.basedir = basedir = os.path.join("backupdb", "unicode") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create(dbfile) self.writeto(u"f\u00f6\u00f6.txt", "foo.txt") files = [fn for fn in listdir_unicode(unicode(basedir)) if fn.endswith(".txt")] self.failUnlessEqual(len(files), 1) foo_fn = os.path.join(basedir, files[0]) #print foo_fn, type(foo_fn) r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload("foo-cap") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), "foo-cap") self.failUnlessEqual(r.should_check(), False) bar_fn = self.writeto(u"b\u00e5r.txt", "bar.txt") #print bar_fn, type(bar_fn) r = bdb.check_file(bar_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload("bar-cap") r = bdb.check_file(bar_fn) self.failUnlessEqual(r.was_uploaded(), "bar-cap") self.failUnlessEqual(r.should_check(), False)
def test_private_config(self): basedir = "test_node/test_private_config" privdir = os.path.join(basedir, "private") fileutil.make_dirs(privdir) f = open(os.path.join(privdir, 'already'), 'wt') f.write("secret") f.close() n = TestNode(basedir) self.failUnlessEqual(n.get_private_config("already"), "secret") self.failUnlessEqual(n.get_private_config("not", "default"), "default") self.failUnlessRaises(MissingConfigEntry, n.get_private_config, "not") value = n.get_or_create_private_config("new", "start") self.failUnlessEqual(value, "start") self.failUnlessEqual(n.get_private_config("new"), "start") counter = [] def make_newer(): counter.append("called") return "newer" value = n.get_or_create_private_config("newer", make_newer) self.failUnlessEqual(len(counter), 1) self.failUnlessEqual(value, "newer") self.failUnlessEqual(n.get_private_config("newer"), "newer") value = n.get_or_create_private_config("newer", make_newer) self.failUnlessEqual(len(counter), 1) # don't call unless necessary self.failUnlessEqual(value, "newer")
def __init__(self, config, basedir=u"."): """ Initialize the node with the given configuration. It's base directory is the current directory by default. """ service.MultiService.__init__(self) # ideally, this would only be in _Config (or otherwise abstracted) self.basedir = abspath_expanduser_unicode(unicode(basedir)) # XXX don't write files in ctor! fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700) with open(os.path.join(self.basedir, "private", "README"), "w") as f: f.write(PRIV_README) self.config = config self.get_config = config.get_config # XXX stopgap self.nickname = config.nickname # XXX stopgap self.init_tempdir() self.check_privacy() self.create_log_tub() self.logSource = "Node" self.setup_logging() self.create_i2p_provider() self.create_tor_provider() self.init_connections() self.set_tub_options() self.create_main_tub() self.create_control_tub() self.log("Node constructed. " + get_package_versions_string()) iputil.increase_rlimits()
def make_client(self, i, write_config=True): clientid = hashutil.tagged_hash("clientid", str(i))[:20] clientdir = os.path.join(self.basedir, "clients", idlib.shortnodeid_b2a(clientid)) fileutil.make_dirs(clientdir) tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg") if write_config: from twisted.internet import reactor _, port_endpoint = self.port_assigner.assign(reactor) f = open(tahoe_cfg_path, "w") f.write("[node]\n") f.write("nickname = client-%d\n" % i) f.write("web.port = {}\n".format(port_endpoint)) f.write("[storage]\n") f.write("enabled = false\n") f.close() else: _assert(os.path.exists(tahoe_cfg_path), tahoe_cfg_path=tahoe_cfg_path) c = None if i in self.client_config_hooks: # this hook can either modify tahoe.cfg, or return an # entirely new Client instance c = self.client_config_hooks[i](clientdir) if not c: c = yield create_no_network_client(clientdir) c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) c.nodeid = clientid c.short_nodeid = b32encode(clientid).lower()[:8] c._servers = self.all_servers # can be updated later c.setServiceParent(self) defer.returnValue(c)
def start(opts, out=sys.stdout, err=sys.stderr): basedir = opts['basedir'] print >>out, "STARTING", quote_output(basedir) if not os.path.isdir(basedir): print >>err, "%s does not look like a directory at all" % quote_output(basedir) return 1 for fn in listdir_unicode(basedir): if fn.endswith(u".tac"): tac = str(fn) break else: print >>err, "%s does not look like a node directory (no .tac file)" % quote_output(basedir) return 1 if "client" in tac: nodetype = "client" elif "introducer" in tac: nodetype = "introducer" else: nodetype = "unknown (%s)" % tac args = ["twistd", "-y", tac] if opts["syslog"]: args.append("--syslog") elif nodetype in ("client", "introducer"): fileutil.make_dirs(os.path.join(basedir, "logs")) args.extend(["--logfile", os.path.join("logs", "twistd.log")]) if opts["profile"]: args.extend(["--profile=profiling_results.prof", "--savestats",]) # now we're committed os.chdir(basedir) from twisted.scripts import twistd sys.argv = args twistd.run()
def __init__(self, basedir=u"."): service.MultiService.__init__(self) self.basedir = abspath_expanduser_unicode(unicode(basedir)) self.config_fname = os.path.join(self.basedir, "tahoe.cfg") self._portnumfile = os.path.join(self.basedir, self.PORTNUMFILE) fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700) with open(os.path.join(self.basedir, "private", "README"), "w") as f: f.write(PRIV_README) # creates self.config self.read_config() nickname_utf8 = self.get_config("node", "nickname", "<unspecified>") self.nickname = nickname_utf8.decode("utf-8") assert type(self.nickname) is unicode self.init_tempdir() self.check_privacy() self.create_log_tub() self.logSource="Node" self.setup_logging() self.create_i2p_provider() self.create_tor_provider() self.init_connections() self.set_tub_options() self.create_main_tub() self.create_control_tub() self.log("Node constructed. " + get_package_versions_string()) iputil.increase_rlimits()
def make_client(self, i, write_config=True): clientid = hashutil.tagged_hash("clientid", str(i))[:20] clientdir = os.path.join(self.basedir, "clients", idlib.shortnodeid_b2a(clientid)) fileutil.make_dirs(clientdir) tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg") if write_config: f = open(tahoe_cfg_path, "w") f.write("[node]\n") f.write("nickname = client-%d\n" % i) f.write("web.port = tcp:0:interface=127.0.0.1\n") f.write("[storage]\n") f.write("enabled = false\n") f.close() else: _assert(os.path.exists(tahoe_cfg_path), tahoe_cfg_path=tahoe_cfg_path) c = None if i in self.client_config_hooks: # this hook can either modify tahoe.cfg, or return an # entirely new Client instance c = self.client_config_hooks[i](clientdir) if not c: c = NoNetworkClient(clientdir) c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) c.nodeid = clientid c.short_nodeid = b32encode(clientid).lower()[:8] c._servers = self.all_servers # can be updated later c.setServiceParent(self) return c
def test_directory(self): self.basedir = basedir = os.path.join("backupdb", "directory") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create_or_skip(dbfile) self.failUnless(bdb) contents = {u"file1": "URI:CHK:blah1", u"file2": "URI:CHK:blah2", u"dir1": "URI:DIR2-CHK:baz2"} r = bdb.check_directory(contents) self.failUnless(isinstance(r, backupdb.DirectoryResult)) self.failIf(r.was_created()) dircap = "URI:DIR2-CHK:foo1" r.did_create(dircap) r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(r.should_check(), False) # if we spontaneously decide to upload it anyways, nothing should # break r.did_create(dircap) r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(type(r.was_created()), str) self.failUnlessEqual(r.should_check(), False) bdb.NO_CHECK_BEFORE = 0 bdb.ALWAYS_CHECK_AFTER = 0.1 time.sleep(1.0) r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(r.should_check(), True) r.did_check_healthy("results") bdb.NO_CHECK_BEFORE = 200 bdb.ALWAYS_CHECK_AFTER = 400 r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(r.should_check(), False) contents2 = {u"file1": "URI:CHK:blah1", u"dir1": "URI:DIR2-CHK:baz2"} r = bdb.check_directory(contents2) self.failIf(r.was_created()) contents3 = {u"file1": "URI:CHK:blah1", u"file2": "URI:CHK:blah3", u"dir1": "URI:DIR2-CHK:baz2"} r = bdb.check_directory(contents3) self.failIf(r.was_created())
def setUpHelper(self, basedir): fileutil.make_dirs(basedir) self.helper = h = offloaded.Helper(basedir, self.storage_broker, self.secret_holder, None, None) h.chk_upload_helper_class = CHKUploadHelper_fake self.helper_furl = self.tub.registerReference(h)
def __init__(self, storedir, nodeid, reserved_space=0, discard_storage=False, readonly_storage=False, stats_provider=None, expiration_enabled=False, expiration_mode="age", expiration_override_lease_duration=None, expiration_cutoff_date=None, expiration_sharetypes=("mutable", "immutable")): service.MultiService.__init__(self) assert isinstance(nodeid, str) assert len(nodeid) == 20 self.my_nodeid = nodeid self.storedir = storedir sharedir = os.path.join(storedir, "shares") fileutil.make_dirs(sharedir) self.sharedir = sharedir # we don't actually create the corruption-advisory dir until necessary self.corruption_advisory_dir = os.path.join(storedir, "corruption-advisories") self.reserved_space = int(reserved_space) self.no_storage = discard_storage self.readonly_storage = readonly_storage self.stats_provider = stats_provider if self.stats_provider: self.stats_provider.register_producer(self) self.incomingdir = os.path.join(sharedir, 'incoming') self._clean_incomplete() fileutil.make_dirs(self.incomingdir) self._active_writers = weakref.WeakKeyDictionary() log.msg("StorageServer created", facility="tahoe.storage") if reserved_space: if self.get_available_space() is None: log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored", umin="0wZ27w", level=log.UNUSUAL) self.latencies = {"allocate": [], # immutable "write": [], "close": [], "read": [], "get": [], "writev": [], # mutable "readv": [], "add-lease": [], # both "renew": [], "cancel": [], } self.add_bucket_counter() statefile = os.path.join(self.storedir, "lease_checker.state") historyfile = os.path.join(self.storedir, "lease_checker.history") klass = self.LeaseCheckerClass self.lease_checker = klass(self, statefile, historyfile, expiration_enabled, expiration_mode, expiration_override_lease_duration, expiration_cutoff_date, expiration_sharetypes) self.lease_checker.setServiceParent(self)
def create_node(config, out=sys.stdout, err=sys.stderr): basedir = config['basedir'] # This should always be called with an absolute Unicode basedir. precondition(isinstance(basedir, unicode), basedir) if os.path.exists(basedir): if listdir_unicode(basedir): print >>err, "The base directory %s is not empty." % quote_output(basedir) print >>err, "To avoid clobbering anything, I am going to quit now." print >>err, "Please use a different directory, or empty this one." return -1 # we're willing to use an empty directory else: os.mkdir(basedir) f = open(os.path.join(basedir, "tahoe-client.tac"), "w") f.write(client_tac) f.close() c = open(os.path.join(basedir, "tahoe.cfg"), "w") write_node_config(c, config) c.write("[client]\n") c.write("introducer.furl = %s\n" % config.get("introducer", "")) c.write("helper.furl =\n") c.write("#key_generator.furl =\n") c.write("#stats_gatherer.furl =\n") c.write("#shares.needed = 3\n") c.write("#shares.happy = 7\n") c.write("#shares.total = 10\n") c.write("\n") boolstr = {True:"true", False:"false"} c.write("[storage]\n") storage_enabled = not config.get("no-storage", None) c.write("enabled = %s\n" % boolstr[storage_enabled]) c.write("#readonly =\n") c.write("#reserved_space =\n") c.write("#expire.enabled =\n") c.write("#expire.mode =\n") c.write("\n") c.write("[helper]\n") c.write("enabled = false\n") c.write("\n") c.close() from allmydata.util import fileutil fileutil.make_dirs(os.path.join(basedir, "private"), 0700) print >>out, "Node created in %s" % quote_output(basedir) if not config.get("introducer", ""): print >>out, " Please set [client]introducer.furl= in tahoe.cfg!" print >>out, " The node cannot connect to a grid without it." if not config.get("nickname", ""): print >>out, " Please set [node]nickname= in tahoe.cfg" return 0
def make_server(self, i, readonly=False): serverid = hashutil.tagged_hash("serverid", str(i))[:20] serverdir = os.path.join(self.basedir, "servers", idlib.shortnodeid_b2a(serverid), "storage") fileutil.make_dirs(serverdir) ss = StorageServer(serverdir, serverid, stats_provider=SimpleStats(), readonly_storage=readonly) ss._no_network_server_number = i return ss
def _allocate_slot_share(self, bucketdir, secrets, sharenum, allocated_size, owner_num=0): (write_enabler, renew_secret, cancel_secret) = secrets my_nodeid = self.my_nodeid fileutil.make_dirs(bucketdir) filename = os.path.join(bucketdir, "%d" % sharenum) share = create_mutable_sharefile(filename, my_nodeid, write_enabler, self) return share
def __init__(self, basedir, pollinterval=1*HOUR, old=1*HOUR): service.MultiService.__init__(self) self.basedir = basedir fileutil.make_dirs(basedir) self.old = old self.files = weakref.WeakValueDictionary() t = internet.TimerService(pollinterval, self.check) t.setServiceParent(self)
def test_tahoe_cfg_hash_in_name(self): basedir = "test_node/test_cfg_hash_in_name" nickname = "Hash#Bang!" # a clever nickname containing a hash fileutil.make_dirs(basedir) f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write("[node]\n") f.write("nickname = %s\n" % (nickname,)) f.close() n = TestNode(basedir) self.failUnless(n.nickname == nickname)
def test_port_none_introducer(self): basedir = "test_node/test_port_none_introducer" fileutil.make_dirs(basedir) f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write("[node]\n") f.write("tub.port = disabled\n") f.write("tub.location = disabled\n") f.close() e = self.assertRaises(ValueError, IntroducerNode, basedir) self.assertIn("we are Introducer, but tub is not listening", str(e))
def test_disabled_but_storage(self): basedir = "test_node/test_disabled_but_storage" fileutil.make_dirs(basedir) f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write(BASE_CONFIG) f.write(NOLISTEN) f.write(ENABLE_STORAGE) f.close() e = self.assertRaises(ValueError, Client, basedir) self.assertIn("storage is enabled, but tub is not listening", str(e))
def test_exclude_options_unicode(self): nice_doc = u"nice_d\u00F8c.lyx" try: doc_pattern_arg = u"*d\u00F8c*".encode(get_io_encoding()) except UnicodeEncodeError: raise unittest.SkipTest( "A non-ASCII command argument could not be encoded on this platform." ) root_listdir = (u'lib.a', u'_darcs', u'subdir', nice_doc) basedir = "cli/Backup/exclude_options_unicode" fileutil.make_dirs(basedir) nodeurl_path = os.path.join(basedir, 'node.url') fileutil.write(nodeurl_path, 'http://example.net:2357/') def parse(args): return parse_options(basedir, "backup", args) # test simple exclude backup_options = parse(['--exclude', doc_pattern_arg, 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (nice_doc, )) # multiple exclude backup_options = parse( ['--exclude', doc_pattern_arg, '--exclude', 'lib.?', 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'_darcs', u'subdir'), (nice_doc, u'lib.a')) # read exclude patterns from file exclusion_string = doc_pattern_arg + "\nlib.?" excl_filepath = os.path.join(basedir, 'exclusion') fileutil.write(excl_filepath, exclusion_string) backup_options = parse(['--exclude-from', excl_filepath, 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'_darcs', u'subdir'), (nice_doc, u'lib.a')) # test that an iterator works too backup_options = parse(['--exclude', doc_pattern_arg, 'from', 'to']) filtered = list(backup_options.filter_listdir(iter(root_listdir))) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (nice_doc, ))
def test_baddir(self): self.skip_if_cannot_daemonize() basedir = self.workdir("test_baddir") fileutil.make_dirs(basedir) d = self.run_bintahoe(["--quiet", "start", "--basedir", basedir]) def _cb(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 1) self.failUnless("does not look like a node directory" in err, err) d.addCallback(_cb) def _then_stop_it(res): return self.run_bintahoe(["--quiet", "stop", "--basedir", basedir]) d.addCallback(_then_stop_it) def _cb2(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 2) self.failUnless( "does not look like a running node directory" in err) d.addCallback(_cb2) def _then_start_in_bogus_basedir(res): not_a_dir = os.path.join(basedir, "bogus") return self.run_bintahoe( ["--quiet", "start", "--basedir", not_a_dir]) d.addCallback(_then_start_in_bogus_basedir) def _cb3(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 1) self.failUnless("does not look like a directory at all" in err, err) d.addCallback(_cb3) return d
def test_exclude_options(self): root_listdir = (u'lib.a', u'_darcs', u'subdir', u'nice_doc.lyx') subdir_listdir = (u'another_doc.lyx', u'run_snake_run.py', u'CVS', u'.svn', u'_darcs') basedir = "cli/Backup/exclude_options" fileutil.make_dirs(basedir) nodeurl_path = os.path.join(basedir, 'node.url') fileutil.write(nodeurl_path, 'http://example.net:2357/') def parse(args): return parse_options(basedir, "backup", args) # test simple exclude backup_options = parse(['--exclude', '*lyx', 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (u'nice_doc.lyx',)) # multiple exclude backup_options = parse(['--exclude', '*lyx', '--exclude', 'lib.?', 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'_darcs', u'subdir'), (u'nice_doc.lyx', u'lib.a')) # vcs metadata exclusion backup_options = parse(['--exclude-vcs', 'from', 'to']) filtered = list(backup_options.filter_listdir(subdir_listdir)) self._check_filtering(filtered, subdir_listdir, (u'another_doc.lyx', u'run_snake_run.py',), (u'CVS', u'.svn', u'_darcs')) # read exclude patterns from file exclusion_string = "_darcs\n*py\n.svn" excl_filepath = os.path.join(basedir, 'exclusion') fileutil.write(excl_filepath, exclusion_string) backup_options = parse(['--exclude-from-utf-8', excl_filepath, 'from', 'to']) filtered = list(backup_options.filter_listdir(subdir_listdir)) self._check_filtering(filtered, subdir_listdir, (u'another_doc.lyx', u'CVS'), (u'.svn', u'_darcs', u'run_snake_run.py')) # test BackupConfigurationError self.failUnlessRaises(cli.BackupConfigurationError, parse, ['--exclude-from-utf-8', excl_filepath + '.no', 'from', 'to']) # test that an iterator works too backup_options = parse(['--exclude', '*lyx', 'from', 'to']) filtered = list(backup_options.filter_listdir(iter(root_listdir))) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (u'nice_doc.lyx',))
def read_config(basedir, portnumfile, generated_files=[], _valid_config=None): """ Read and validate configuration. :param unicode basedir: directory where configuration data begins :param unicode portnumfile: filename fragment for "port number" files :param list generated_files: a list of automatically-generated configuration files. :param ValidConfiguration _valid_config: (internal use, optional) a structure defining valid configuration sections and keys :returns: :class:`allmydata.node._Config` instance """ basedir = abspath_expanduser_unicode(ensure_text(basedir)) if _valid_config is None: _valid_config = _common_valid_config() # complain if there's bad stuff in the config dir _error_about_old_config_files(basedir, generated_files) # canonicalize the portnum file portnumfile = os.path.join(basedir, portnumfile) # (try to) read the main config file config_fname = os.path.join(basedir, "tahoe.cfg") try: parser = configutil.get_config(config_fname) except EnvironmentError as e: if e.errno != errno.ENOENT: raise # The file is missing, just create empty ConfigParser. parser = configutil.get_config_from_string(u"") configutil.validate_config(config_fname, parser, _valid_config) # make sure we have a private configuration area fileutil.make_dirs(os.path.join(basedir, "private"), 0o700) return _Config(parser, portnumfile, basedir, config_fname)
def __init__(self, basedir, num_clients=1, num_servers=10, client_config_hooks={}): service.MultiService.__init__(self) self.basedir = basedir fileutil.make_dirs(basedir) self.servers_by_number = {} # maps to StorageServer instance self.wrappers_by_id = {} # maps to wrapped StorageServer instance self.proxies_by_id = {} # maps to IServer on which .rref is a wrapped # StorageServer self.clients = [] for i in range(num_servers): ss = self.make_server(i) self.add_server(i, ss) self.rebuild_serverlist() for i in range(num_clients): clientid = hashutil.tagged_hash("clientid", str(i))[:20] clientdir = os.path.join(basedir, "clients", idlib.shortnodeid_b2a(clientid)) fileutil.make_dirs(clientdir) f = open(os.path.join(clientdir, "tahoe.cfg"), "w") f.write("[node]\n") f.write("nickname = client-%d\n" % i) f.write("web.port = tcp:0:interface=127.0.0.1\n") f.write("[storage]\n") f.write("enabled = false\n") f.close() c = None if i in client_config_hooks: # this hook can either modify tahoe.cfg, or return an # entirely new Client instance c = client_config_hooks[i](clientdir) if not c: c = NoNetworkClient(clientdir) c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) c.nodeid = clientid c.short_nodeid = b32encode(clientid).lower()[:8] c._servers = self.all_servers # can be updated later c.setServiceParent(self) self.clients.append(c)
def copy_sdmf_shares(self): # We'll basically be short-circuiting the upload process. servernums = list(self.g.servers_by_number.keys()) assert len(servernums) == 10 assignments = list(zip(self.sdmf_old_shares.keys(), servernums)) # Get the storage index. cap = uri.from_string(self.sdmf_old_cap) si = cap.get_storage_index() # Now execute each assignment by writing the storage. for (share, servernum) in assignments: sharedata = base64.b64decode(self.sdmf_old_shares[share]) storedir = self.get_serverdir(servernum) storage_path = os.path.join(storedir, "shares", storage_index_to_dir(si)) fileutil.make_dirs(storage_path) fileutil.write(os.path.join(storage_path, "%d" % share), sharedata) # ...and verify that the shares are there. shares = self.find_uri_shares(self.sdmf_old_cap) assert len(shares) == 10
def testing_tub(config_data=''): """ Creates a 'main' Tub for testing purposes, from config data """ from twisted.internet import reactor basedir = 'dummy_basedir' config = config_from_string(basedir, 'DEFAULT_PORTNUMFILE_BLANK', config_data) fileutil.make_dirs(os.path.join(basedir, 'private')) i2p_provider = create_i2p_provider(reactor, config) tor_provider = create_tor_provider(reactor, config) handlers = create_connection_handlers(reactor, config, i2p_provider, tor_provider) default_connection_handlers, foolscap_connection_handlers = handlers tub_options = create_tub_options(config) main_tub = create_main_tub( config, tub_options, default_connection_handlers, foolscap_connection_handlers, i2p_provider, tor_provider, cert_filename='DEFAULT_CERTFILE_BLANK' ) return main_tub
def __init__(self, basedir, num_clients=1, num_servers=10, client_config_hooks={}): service.MultiService.__init__(self) self.basedir = basedir fileutil.make_dirs(basedir) self.servers_by_number = {} # maps to StorageServer instance self.wrappers_by_id = {} # maps to wrapped StorageServer instance self.proxies_by_id = {} # maps to IServer on which .rref is a wrapped # StorageServer self.clients = [] self.client_config_hooks = client_config_hooks for i in range(num_servers): ss = self.make_server(i) self.add_server(i, ss) self.rebuild_serverlist() for i in range(num_clients): d = self.make_client(i) d.addCallback(lambda c: self.clients.append(c))
def remote_advise_corrupt_share(self, share_type, storage_index, shnum, reason): fileutil.make_dirs(self.corruption_advisory_dir) now = time_format.iso_utc(sep="T") si_s = si_b2a(storage_index) # windows can't handle colons in the filename fn = os.path.join(self.corruption_advisory_dir, "%s--%s-%d" % (now, si_s, shnum)).replace(":","") f = open(fn, "w") f.write("report: Share Corruption\n") f.write("type: %s\n" % share_type) f.write("storage_index: %s\n" % si_s) f.write("share_number: %d\n" % shnum) f.write("\n") f.write(reason) f.write("\n") f.close() log.msg(format=("client claims corruption in (%(share_type)s) " + "%(si)s-%(shnum)d: %(reason)s"), share_type=share_type, si=si_s, shnum=shnum, reason=reason, level=log.SCARY, umid="SGx2fA") return None
def test_make_dirs_with_absolute_mode(self): if sys.platform == 'win32': raise unittest.SkipTest( "Permissions don't work the same on windows.") workdir = fileutil.abspath_expanduser_unicode( u"test_make_dirs_with_absolute_mode") fileutil.make_dirs(workdir) abspath = fileutil.abspath_expanduser_unicode(u"a/b/c/d", base=workdir) fileutil.make_dirs_with_absolute_mode(workdir, abspath, 0o766) new_mode = os.stat(os.path.join(workdir, "a", "b", "c", "d")).st_mode & 0o777 self.failUnlessEqual(new_mode, 0o766) new_mode = os.stat(os.path.join(workdir, "a", "b", "c")).st_mode & 0o777 self.failUnlessEqual(new_mode, 0o766) new_mode = os.stat(os.path.join(workdir, "a", "b")).st_mode & 0o777 self.failUnlessEqual(new_mode, 0o766) new_mode = os.stat(os.path.join(workdir, "a")).st_mode & 0o777 self.failUnlessEqual(new_mode, 0o766) new_mode = os.stat(workdir).st_mode & 0o777 self.failIfEqual(new_mode, 0o766)
def __init__(self, basedir, storage_broker, secret_holder, stats_provider, history): self._basedir = basedir self._storage_broker = storage_broker self._secret_holder = secret_holder self._chk_incoming = os.path.join(basedir, "CHK_incoming") self._chk_encoding = os.path.join(basedir, "CHK_encoding") fileutil.make_dirs(self._chk_incoming) fileutil.make_dirs(self._chk_encoding) self._active_uploads = {} self._all_uploads = weakref.WeakKeyDictionary() # for debugging self.stats_provider = stats_provider if stats_provider: stats_provider.register_producer(self) self._counters = {"chk_upload_helper.upload_requests": 0, "chk_upload_helper.upload_already_present": 0, "chk_upload_helper.upload_need_upload": 0, "chk_upload_helper.resumes": 0, "chk_upload_helper.fetched_bytes": 0, "chk_upload_helper.encoded_bytes": 0, } self._history = history
def start(opts, out=sys.stdout, err=sys.stderr): basedir = opts['basedir'] print >> out, "STARTING", quote_output(basedir) if not os.path.isdir(basedir): print >> err, "%s does not look like a directory at all" % quote_output( basedir) return 1 for fn in listdir_unicode(basedir): if fn.endswith(u".tac"): tac = str(fn) break else: print >> err, "%s does not look like a node directory (no .tac file)" % quote_output( basedir) return 1 if "client" in tac: nodetype = "client" elif "introducer" in tac: nodetype = "introducer" else: nodetype = "unknown (%s)" % tac args = ["twistd", "-y", tac] if opts["syslog"]: args.append("--syslog") elif nodetype in ("client", "introducer"): fileutil.make_dirs(os.path.join(basedir, "logs")) args.extend(["--logfile", os.path.join("logs", "twistd.log")]) if opts["profile"]: args.extend([ "--profile=profiling_results.prof", "--savestats", ]) # now we're committed os.chdir(basedir) from twisted.scripts import twistd sys.argv = args twistd.run()
def remote_close(self): precondition(not self.closed) start = time.time() fileutil.make_dirs(os.path.dirname(self.finalhome)) fileutil.rename(self.incominghome, self.finalhome) try: # self.incominghome is like storage/shares/incoming/ab/abcde/4 . # We try to delete the parent (.../ab/abcde) to avoid leaving # these directories lying around forever, but the delete might # fail if we're working on another share for the same storage # index (like ab/abcde/5). The alternative approach would be to # use a hierarchy of objects (PrefixHolder, BucketHolder, # ShareWriter), each of which is responsible for a single # directory on disk, and have them use reference counting of # their children to know when they should do the rmdir. This # approach is simpler, but relies on os.rmdir refusing to delete # a non-empty directory. Do *not* use fileutil.rm_dir() here! os.rmdir(os.path.dirname(self.incominghome)) # we also delete the grandparent (prefix) directory, .../ab , # again to avoid leaving directories lying around. This might # fail if there is another bucket open that shares a prefix (like # ab/abfff). os.rmdir(os.path.dirname(os.path.dirname(self.incominghome))) # we leave the great-grandparent (incoming/) directory in place. except EnvironmentError: # ignore the "can't rmdir because the directory is not empty" # exceptions, those are normal consequences of the # above-mentioned conditions. pass self._sharefile = None self.closed = True self._canary.dontNotifyOnDisconnect(self._disconnect_marker) filelen = os.stat(self.finalhome)[stat.ST_SIZE] self.ss.bucket_writer_closed(self, filelen) self.ss.add_latency("close", time.time() - start) self.ss.count("close")
def create_no_network_client(basedir): """ :return: a Deferred yielding an instance of _Client subclass which does no actual networking but has the same API. """ basedir = abspath_expanduser_unicode(unicode(basedir)) fileutil.make_dirs(os.path.join(basedir, "private"), 0o700) from allmydata.client import read_config config = read_config(basedir, u'client.port') storage_broker = NoNetworkStorageBroker() client = _NoNetworkClient( config, main_tub=None, i2p_provider=None, tor_provider=None, introducer_clients=[], storage_farm_broker=storage_broker, ) # this is a (pre-existing) reference-cycle and also a bad idea, see: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2949 storage_broker.client = client return defer.succeed(client)
def test_unicode(self): self.skip_if_cannot_represent_filename(u"f\u00f6\u00f6.txt") self.skip_if_cannot_represent_filename(u"b\u00e5r.txt") self.basedir = basedir = os.path.join("backupdb", "unicode") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create_or_skip(dbfile) self.failUnless(bdb) self.writeto(u"f\u00f6\u00f6.txt", "foo.txt") files = [ fn for fn in listdir_unicode(unicode(basedir)) if fn.endswith(".txt") ] self.failUnlessEqual(len(files), 1) foo_fn = os.path.join(basedir, files[0]) #print foo_fn, type(foo_fn) r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload("foo-cap") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), "foo-cap") self.failUnlessEqual(r.should_check(), False) bar_fn = self.writeto(u"b\u00e5r.txt", "bar.txt") #print bar_fn, type(bar_fn) r = bdb.check_file(bar_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload("bar-cap") r = bdb.check_file(bar_fn) self.failUnlessEqual(r.was_uploaded(), "bar-cap") self.failUnlessEqual(r.should_check(), False)
def test_1654(self): # test that the Retrieve object unconditionally verifies the block # hash tree root for mutable shares. The failure mode is that # carefully crafted shares can cause undetected corruption (the # retrieve appears to finish successfully, but the result is # corrupted). When fixed, these shares always cause a # CorruptShareError, which results in NotEnoughSharesError in this # 2-of-2 file. self.basedir = "mutable/Problems/test_1654" self.set_up_grid(num_servers=2) cap = uri.from_string(TEST_1654_CAP) si = cap.get_storage_index() for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]: sharedata = base64.b64decode(share) storedir = self.get_serverdir(shnum) storage_path = os.path.join(storedir, "shares", storage_index_to_dir(si)) fileutil.make_dirs(storage_path) fileutil.write(os.path.join(storage_path, "%d" % shnum), sharedata) nm = self.g.clients[0].nodemaker n = nm.create_from_cap(TEST_1654_CAP) # to exercise the problem correctly, we must ensure that sh0 is # processed first, and sh1 second. NoNetworkGrid has facilities to # stall the first request from a single server, but it's not # currently easy to extend that to stall the second request (mutable # retrievals will see two: first the mapupdate, then the fetch). # However, repeated executions of this run without the #1654 fix # suggests that we're failing reliably even without explicit stalls, # probably because the servers are queried in a fixed order. So I'm # ok with relying upon that. d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption", "ran out of servers", n.download_best_version) return d
def create_introducer(config): out = config.stdout err = config.stderr basedir = config['basedir'] # This should always be called with an absolute Unicode basedir. precondition(isinstance(basedir, unicode), basedir) if os.path.exists(basedir): if listdir_unicode(basedir): print("The base directory %s is not empty." % quote_local_unicode_path(basedir), file=err) print("To avoid clobbering anything, I am going to quit now.", file=err) print("Please use a different directory, or empty this one.", file=err) defer.returnValue(-1) # we're willing to use an empty directory else: os.mkdir(basedir) write_tac(basedir, "introducer") fileutil.make_dirs(os.path.join(basedir, "private"), 0o700) with open(os.path.join(basedir, "tahoe.cfg"), "w") as c: yield write_node_config(c, config) print("Introducer created in %s" % quote_local_unicode_path(basedir), file=out) defer.returnValue(0)
def __init__(self, filename, max_size=None, create=False): """ If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """ precondition((max_size is not None) or (not create), max_size, create) self.home = filename self._max_size = max_size if create: # touch the file, so later callers will see that we're working on # it. Also construct the metadata. assert not os.path.exists(self.home) fileutil.make_dirs(os.path.dirname(self.home)) f = open(self.home, 'wb') # The second field -- the four-byte share data length -- is no # longer used as of Tahoe v1.3.0, but we continue to write it in # there in case someone downgrades a storage server from >= # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one # server to another, etc. We do saturation -- a share data length # larger than 2**32-1 (what can fit into the field) is marked as # the largest length that can fit into the field. That way, even # if this does happen, the old < v1.3.0 server will still allow # clients to read the first part of the share. f.write(struct.pack(">LLL", 1, min(2**32 - 1, max_size), 0)) f.close() self._lease_offset = max_size + 0x0c self._num_leases = 0 else: f = open(self.home, 'rb') filesize = os.path.getsize(self.home) (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) f.close() if version != 1: msg = "sharefile %s had version %d but we wanted 1" % \ (filename, version) raise UnknownImmutableContainerVersionError(msg) self._num_leases = num_leases self._lease_offset = filesize - (num_leases * self.LEASE_SIZE) self._data_offset = 0xc
def test_exclude_from_tilde_expansion(self): basedir = "cli/Backup/exclude_from_tilde_expansion" fileutil.make_dirs(basedir) nodeurl_path = os.path.join(basedir, 'node.url') fileutil.write(nodeurl_path, 'http://example.net:2357/') # ensure that tilde expansion is performed on exclude-from argument exclude_file = u'~/.tahoe/excludes.dummy' ns = Namespace() ns.called = False def call_file(name, *args): ns.called = True self.failUnlessEqual(name, abspath_expanduser_unicode(exclude_file)) return StringIO() patcher = MonkeyPatcher((__builtin__, 'file', call_file)) patcher.runWithPatches( parse_options, basedir, "backup", ['--exclude-from', unicode_to_argv(exclude_file), 'from', 'to']) self.failUnless(ns.called)
def _test_location(self, basedir, expected_addresses, tub_port=None, tub_location=None, local_addresses=None): fileutil.make_dirs(basedir) f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write("[node]\n") if tub_port: f.write("tub.port = %d\n" % (tub_port, )) if tub_location is not None: f.write("tub.location = %s\n" % (tub_location, )) f.close() if local_addresses: self.patch(iputil, 'get_local_addresses_sync', lambda: local_addresses) n = TestNode(basedir) n.setServiceParent(self.parent) furl = n.tub.registerReference(n) for address in expected_addresses: self.failUnlessIn(address, furl)
def test_create_long_path(self): """ Even for paths with total length greater than 260 bytes, ``fileutil.abspath_expanduser_unicode`` produces a path on which other path-related APIs can operate. https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx documents certain Windows-specific path length limitations this test is specifically intended to demonstrate can be overcome. """ workdir = u"test_create_long_path" fileutil.make_dirs(workdir) base_path = fileutil.abspath_expanduser_unicode(workdir) base_length = len(base_path) # Construct a path /just/ long enough to exercise the important case. # It would be nice if we could just use a seemingly globally valid # long file name (the `x...` portion) here - for example, a name 255 # bytes long- and a previous version of this test did just that. # However, aufs imposes a 242 byte length limit on file names. Most # other POSIX filesystems do allow names up to 255 bytes. It's not # clear there's anything we can *do* about lower limits, though, and # POSIX.1-2017 (and earlier) only requires that the maximum be at # least 14 (!!!) bytes. long_path = os.path.join(base_path, u'x' * (261 - base_length)) def _cleanup(): fileutil.remove(long_path) self.addCleanup(_cleanup) fileutil.write(long_path, b"test") self.failUnless(os.path.exists(long_path)) self.failUnlessEqual(fileutil.read(long_path), b"test") _cleanup() self.failIf(os.path.exists(long_path))
def test_maker(self): basedir = "client/NodeMaker/maker" fileutil.make_dirs(basedir) f = open(os.path.join(basedir, "tahoe.cfg"), "w") f.write(BASECONFIG) f.close() c = client.Client(basedir) n = c.create_node_from_uri( "URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failUnless(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failIf(n.is_mutable()) n = c.create_node_from_uri("URI:LIT:n5xgk") self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failUnless(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failIf(n.is_mutable()) n = c.create_node_from_uri( "URI:SSK:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failUnless(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failIf(n.is_readonly()) self.failUnless(n.is_mutable()) n = c.create_node_from_uri( "URI:SSK-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failUnless(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failUnless(n.is_mutable()) n = c.create_node_from_uri( "URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failUnless(IDirectoryNode.providedBy(n)) self.failIf(n.is_readonly()) self.failUnless(n.is_mutable()) n = c.create_node_from_uri( "URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq" ) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failUnless(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failUnless(n.is_mutable()) unknown_rw = "lafs://from_the_future" unknown_ro = "lafs://readonly_from_the_future" n = c.create_node_from_uri(unknown_rw, unknown_ro) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_unknown()) self.failUnlessReallyEqual(n.get_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro) # Note: it isn't that we *intend* to deploy non-ASCII caps in # the future, it is that we want to make sure older Tahoe-LAFS # versions wouldn't choke on them if we were to do so. See # #1051 and wiki:NewCapDesign for details. unknown_rw = u"lafs://from_the_future_rw_\u263A".encode('utf-8') unknown_ro = u"lafs://readonly_from_the_future_ro_\u263A".encode( 'utf-8') n = c.create_node_from_uri(unknown_rw, unknown_ro) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_unknown()) self.failUnlessReallyEqual(n.get_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)
def writeto(self, filename, data): fn = os.path.join(self.basedir, unicode(filename)) parentdir = os.path.dirname(fn) fileutil.make_dirs(parentdir) fileutil.write(fn, data) return fn
def test_directory(self): self.basedir = basedir = os.path.join("backupdb", "directory") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create_or_skip(dbfile) self.failUnless(bdb) contents = { u"file1": "URI:CHK:blah1", u"file2": "URI:CHK:blah2", u"dir1": "URI:DIR2-CHK:baz2" } r = bdb.check_directory(contents) self.failUnless(isinstance(r, backupdb.DirectoryResult)) self.failIf(r.was_created()) dircap = "URI:DIR2-CHK:foo1" r.did_create(dircap) r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(r.should_check(), False) # if we spontaneously decide to upload it anyways, nothing should # break r.did_create(dircap) r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(type(r.was_created()), str) self.failUnlessEqual(r.should_check(), False) bdb.NO_CHECK_BEFORE = 0 bdb.ALWAYS_CHECK_AFTER = 0.1 time.sleep(1.0) r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(r.should_check(), True) r.did_check_healthy("results") bdb.NO_CHECK_BEFORE = 200 bdb.ALWAYS_CHECK_AFTER = 400 r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(r.should_check(), False) contents2 = {u"file1": "URI:CHK:blah1", u"dir1": "URI:DIR2-CHK:baz2"} r = bdb.check_directory(contents2) self.failIf(r.was_created()) contents3 = { u"file1": "URI:CHK:blah1", u"file2": "URI:CHK:blah3", u"dir1": "URI:DIR2-CHK:baz2" } r = bdb.check_directory(contents3) self.failIf(r.was_created())
def test_check(self): self.basedir = basedir = os.path.join("backupdb", "check") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create_or_skip(dbfile) self.failUnless(bdb) foo_fn = self.writeto("foo.txt", "foo.txt") blah_fn = self.writeto("bar/blah.txt", "blah.txt") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload("foo-cap") r = bdb.check_file(blah_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload("blah-cap") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), "foo-cap") self.failUnlessEqual(type(r.was_uploaded()), str) self.failUnlessEqual(r.should_check(), False) time.sleep(1.0) # make sure the timestamp changes self.writeto("foo.txt", "NEW") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload("new-cap") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), "new-cap") self.failUnlessEqual(r.should_check(), False) # if we spontaneously decide to upload it anyways, nothing should # break r.did_upload("new-cap") r = bdb.check_file(foo_fn, use_timestamps=False) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload("new-cap") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), "new-cap") self.failUnlessEqual(r.should_check(), False) bdb.NO_CHECK_BEFORE = 0 bdb.ALWAYS_CHECK_AFTER = 0.1 r = bdb.check_file(blah_fn) self.failUnlessEqual(r.was_uploaded(), "blah-cap") self.failUnlessEqual(r.should_check(), True) r.did_check_healthy("results") # we know they're ignored for now bdb.NO_CHECK_BEFORE = 200 bdb.ALWAYS_CHECK_AFTER = 400 r = bdb.check_file(blah_fn) self.failUnlessEqual(r.was_uploaded(), "blah-cap") self.failUnlessEqual(r.should_check(), False) os.unlink(os.path.join(basedir, "foo.txt")) fileutil.make_dirs(os.path.join(basedir, "foo.txt")) # file becomes dir r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), False)
def test_cp_overwrite_readonly_mutable_file(self): # tahoe cp should print an error when asked to overwrite a # mutable file that it can't overwrite. self.basedir = "cli/Cp/overwrite_readonly_mutable_file" self.set_up_grid(oneshare=True) # This is our initial file. We'll link its readcap into the # tahoe: alias. test_file_path = os.path.join(self.basedir, "test_file.txt") test_file_contents = "This is a test file." fileutil.write(test_file_path, test_file_contents) # This is our replacement file. We'll try and fail to upload it # over the readcap that we linked into the tahoe: alias. replacement_file_path = os.path.join(self.basedir, "replacement.txt") replacement_file_contents = "These are new contents." fileutil.write(replacement_file_path, replacement_file_contents) d = self.do_cli("create-alias", "tahoe:") d.addCallback(lambda ignored: self.do_cli("put", "--mutable", test_file_path)) def _get_test_uri((rc, out, err)): self.failUnlessEqual(rc, 0) # this should be a write uri self._test_write_uri = out d.addCallback(_get_test_uri) d.addCallback(lambda ignored: self.do_cli("ls", "--json", self._test_write_uri)) def _process_test_json((rc, out, err)): self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "filenode") self.failUnless(data['mutable']) self.failUnlessIn("ro_uri", data) self._test_read_uri = to_str(data["ro_uri"]) d.addCallback(_process_test_json) # Now we'll link the readonly URI into the tahoe: alias. d.addCallback(lambda ignored: self.do_cli("ln", self._test_read_uri, "tahoe:test_file.txt")) d.addCallback(lambda (rc, out, err): self.failUnlessEqual(rc, 0)) # Let's grab the json of that to make sure that we did it right. d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:")) def _process_tahoe_json((rc, out, err)): self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "dirnode") self.failUnlessIn("children", data) kiddata = data['children'] self.failUnlessIn("test_file.txt", kiddata) testtype, testdata = kiddata['test_file.txt'] self.failUnlessEqual(testtype, "filenode") self.failUnless(testdata['mutable']) self.failUnlessIn("ro_uri", testdata) self.failUnlessEqual(to_str(testdata["ro_uri"]), self._test_read_uri) self.failIfIn("rw_uri", testdata) d.addCallback(_process_tahoe_json) # Okay, now we're going to try uploading another mutable file in # place of that one. We should get an error. d.addCallback(lambda ignored: self.do_cli("cp", replacement_file_path, "tahoe:test_file.txt")) def _check_error_message((rc, out, err)): self.failUnlessEqual(rc, 1) self.failUnlessIn("replace or update requested with read-only cap", err) d.addCallback(_check_error_message) # Make extra sure that that didn't work. d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test_file.txt")) d.addCallback(lambda (rc, out, err): self.failUnlessEqual(out, test_file_contents)) d.addCallback(lambda ignored: self.do_cli("get", self._test_read_uri)) d.addCallback(lambda (rc, out, err): self.failUnlessEqual(out, test_file_contents)) # Now we'll do it without an explicit destination. d.addCallback(lambda ignored: self.do_cli("cp", test_file_path, "tahoe:")) d.addCallback(_check_error_message) d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test_file.txt")) d.addCallback(lambda (rc, out, err): self.failUnlessEqual(out, test_file_contents)) d.addCallback(lambda ignored: self.do_cli("get", self._test_read_uri)) d.addCallback(lambda (rc, out, err): self.failUnlessEqual(out, test_file_contents)) # Now we'll link a readonly file into a subdirectory. d.addCallback(lambda ignored: self.do_cli("mkdir", "tahoe:testdir")) d.addCallback(lambda (rc, out, err): self.failUnlessEqual(rc, 0)) d.addCallback(lambda ignored: self.do_cli("ln", self._test_read_uri, "tahoe:test/file2.txt")) d.addCallback(lambda (rc, out, err): self.failUnlessEqual(rc, 0)) test_dir_path = os.path.join(self.basedir, "test") fileutil.make_dirs(test_dir_path) for f in ("file1.txt", "file2.txt"): fileutil.write(os.path.join(test_dir_path, f), f * 10000) d.addCallback(lambda ignored: self.do_cli("cp", "-r", test_dir_path, "tahoe:")) d.addCallback(_check_error_message) d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:test")) def _got_testdir_json((rc, out, err)): self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "dirnode") self.failUnlessIn("children", data) childdata = data['children'] self.failUnlessIn("file2.txt", childdata) file2type, file2data = childdata['file2.txt'] self.failUnlessEqual(file2type, "filenode") self.failUnless(file2data['mutable']) self.failUnlessIn("ro_uri", file2data) self.failUnlessEqual(to_str(file2data["ro_uri"]), self._test_read_uri) self.failIfIn("rw_uri", file2data) d.addCallback(_got_testdir_json) return d
def _build_local_directory(ignored): test2_path = os.path.join(self.basedir, "test2") fileutil.make_dirs(test2_path) for fn in ("mutable1", "mutable2", "imm1", "imm3"): fileutil.write(os.path.join(test2_path, fn), fn * 1000) self.test2_path = test2_path
def mkdir(self, basedir, path, mode=0o777): fn = os.path.join(basedir, path) fileutil.make_dirs(fn, mode)
def test_basic(self): self.basedir = basedir = os.path.join("backupdb", "create") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create(dbfile) self.failUnlessEqual(bdb.VERSION, 2)