def test_fail(self): self.basedir = basedir = os.path.join("backupdb", "fail") fileutil.make_dirs(basedir) # put a non-DB file in the way not_a_db = ("I do not look like a sqlite database\n" + "I'M NOT" * 1000) # OS-X sqlite-2.3.2 takes some convincing self.writeto("not-a-database", not_a_db) stderr_f = StringIO() bdb = backupdb.get_backupdb(os.path.join(basedir, "not-a-database"), stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() self.failUnlessIn("backupdb file is unusable", stderr) self.failUnlessIn("file is encrypted or is not a database", stderr) # put a directory in the way, to exercise a different error path where = os.path.join(basedir, "roadblock-dir") fileutil.make_dirs(where) stderr_f = StringIO() bdb = backupdb.get_backupdb(where, stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() self.failUnlessIn("Unable to create/open backupdb file %s" % (where,), stderr) self.failUnlessIn("unable to open database file", stderr)
def test_fail(self): self.basedir = basedir = os.path.join("backupdb", "fail") fileutil.make_dirs(basedir) # put a non-DB file in the way not_a_db = ("I do not look like a sqlite database\n" + "I'M NOT" * 1000 ) # OS-X sqlite-2.3.2 takes some convincing self.writeto("not-a-database", not_a_db) stderr_f = StringIO() bdb = backupdb.get_backupdb(os.path.join(basedir, "not-a-database"), stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() if "I was unable to import a python sqlite library" in stderr: pass else: self.failUnless("backupdb file is unusable" in stderr, stderr) self.failUnless("file is encrypted or is not a database" in stderr, stderr) # put a directory in the way, to exercise a different error path where = os.path.join(basedir, "roadblock-dir") fileutil.make_dirs(where) stderr_f = StringIO() bdb = backupdb.get_backupdb(where, stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() if "I was unable to import a python sqlite library" in stderr: pass else: self.failUnless(("Unable to create/open backupdb file %s" % where) in stderr, stderr) self.failUnless("unable to open database file" in stderr, stderr)
def test_fail(self): self.basedir = basedir = os.path.join("backupdb", "fail") fileutil.make_dirs(basedir) # put a non-DB file in the way not_a_db = ("I do not look like a sqlite database\n" + "I'M NOT" * 1000 ) # OS-X sqlite-2.3.2 takes some convincing self.writeto("not-a-database", not_a_db) stderr_f = StringIO() bdb = backupdb.get_backupdb(os.path.join(basedir, "not-a-database"), stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() self.failUnlessIn("backupdb file is unusable", stderr) # sqlite-3.19.3 says "file is encrypted or is not a database" # sqlite-3.20.0 says "file is not a database" self.failUnlessIn("is not a database", stderr) # put a directory in the way, to exercise a different error path where = os.path.join(basedir, "roadblock-dir") fileutil.make_dirs(where) stderr_f = StringIO() bdb = backupdb.get_backupdb(where, stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() # the error-message is different under PyPy ... not sure why? if 'pypy' in sys.version.lower(): self.failUnlessIn("Could not open database", stderr) else: self.failUnlessIn("unable to open database file", stderr)
def _reset_last_checked(res): dbfile = self.get_client_config().get_private_path("backupdb.sqlite") self.failUnless(os.path.exists(dbfile), dbfile) bdb = backupdb.get_backupdb(dbfile) bdb.cursor.execute("UPDATE last_upload SET last_checked=0") bdb.cursor.execute("UPDATE directories SET last_checked=0") bdb.connection.commit()
def run(self): options = self.options nodeurl = options['node-url'] self.verbosity = 1 if options['quiet']: self.verbosity = 0 if options['verbose']: self.verbosity = 2 stdout = options.stdout stderr = options.stderr start_timestamp = datetime.datetime.now() self.backupdb = None bdbfile = os.path.join(options["node-directory"], "private", "backupdb.sqlite") bdbfile = abspath_expanduser_unicode(bdbfile) self.backupdb = backupdb.get_backupdb(bdbfile, stderr) if not self.backupdb: print >>stderr, "ERROR: Unable to load backup db." return 1 try: rootcap, path = get_alias(options.aliases, options.to_dir, DEFAULT_ALIAS) except UnknownAliasError, e: e.display(stderr) return 1
def create_or_skip(self, dbfile): stderr = StringIO() bdb = backupdb.get_backupdb(dbfile, stderr=stderr) if not bdb: if "I was unable to import a python sqlite library" in stderr.getvalue(): raise unittest.SkipTest("sqlite unavailable, skipping test") return bdb
def run(self): options = self.options nodeurl = options['node-url'] self.verbosity = 1 if options['quiet']: self.verbosity = 0 if options['verbose']: self.verbosity = 2 stdout = options.stdout stderr = options.stderr start_timestamp = datetime.datetime.now() self.backupdb = None bdbfile = os.path.join(options["node-directory"], "private", "backupdb.sqlite") bdbfile = abspath_expanduser_unicode(bdbfile) self.backupdb = backupdb.get_backupdb(bdbfile, stderr) if not self.backupdb: print >> stderr, "ERROR: Unable to load backup db." return 1 try: rootcap, path = get_alias(options.aliases, options.to_dir, DEFAULT_ALIAS) except UnknownAliasError, e: e.display(stderr) return 1
def create_or_skip(self, dbfile): stderr = StringIO() bdb = backupdb.get_backupdb(dbfile, stderr=stderr) if not bdb: if "I was unable to import a python sqlite library" in stderr.getvalue( ): raise unittest.SkipTest("sqlite unavailable, skipping test") return bdb
def _reset_last_checked(res): dbfile = os.path.join(self.get_clientdir(), "private", "backupdb.sqlite") self.failUnless(os.path.exists(dbfile), dbfile) bdb = backupdb.get_backupdb(dbfile) bdb.cursor.execute("UPDATE last_upload SET last_checked=0") bdb.cursor.execute("UPDATE directories SET last_checked=0") bdb.connection.commit()
def test_upgrade_v1_v2(self): self.basedir = basedir = os.path.join("backupdb", "upgrade_v1_v2") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") stderr = StringIO() created = backupdb.get_backupdb(dbfile, stderr=stderr, create_version=(backupdb.SCHEMA_v1, 1), just_create=True) self.failUnless(created, "unable to create v1 backupdb") # now we should have a v1 database on disk bdb = self.create(dbfile) self.failUnlessEqual(bdb.VERSION, 2)
def test_upgrade_v1_v2(self): self.basedir = basedir = os.path.join("backupdb", "upgrade_v1_v2") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") stderr = StringIO() created = backupdb.get_backupdb(dbfile, stderr=stderr, create_version=(backupdb.SCHEMA_v1, 1), just_create=True) if not created: if "I was unable to import a python sqlite library" in stderr.getvalue(): raise unittest.SkipTest("sqlite unavailable, skipping test") self.fail("unable to create v1 backupdb") # now we should have a v1 database on disk bdb = self.create_or_skip(dbfile) self.failUnless(bdb) self.failUnlessEqual(bdb.VERSION, 2)
def test_wrong_version(self): self.basedir = basedir = os.path.join("backupdb", "wrong_version") fileutil.make_dirs(basedir) where = os.path.join(basedir, "tooold.db") bdb = self.create_or_skip(where) # reach into the DB and make it old bdb.cursor.execute("UPDATE version SET version=0") bdb.connection.commit() # now the next time we open the database, it should be an unusable # version stderr_f = StringIO() bdb = backupdb.get_backupdb(where, stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() self.failUnlessEqual(stderr.strip(), "Unable to handle backupdb version 0")
def test_wrong_version(self): self.basedir = basedir = os.path.join("backupdb", "wrong_version") fileutil.make_dirs(basedir) where = os.path.join(basedir, "tooold.db") bdb = self.create(where) # reach into the DB and make it old bdb.cursor.execute("UPDATE version SET version=0") bdb.connection.commit() # now the next time we open the database, it should be an unusable # version stderr_f = StringIO() bdb = backupdb.get_backupdb(where, stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() self.failUnlessEqual(stderr.strip(), "Unable to handle backupdb version 0")
def test_upgrade_v1_v2(self): self.basedir = basedir = os.path.join("backupdb", "upgrade_v1_v2") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") stderr = StringIO() created = backupdb.get_backupdb(dbfile, stderr=stderr, create_version=(backupdb.SCHEMA_v1, 1), just_create=True) if not created: if "I was unable to import a python sqlite library" in stderr.getvalue( ): raise unittest.SkipTest("sqlite unavailable, skipping test") self.fail("unable to create v1 backupdb") # now we should have a v1 database on disk bdb = self.create_or_skip(dbfile) self.failUnless(bdb) self.failUnlessEqual(bdb.VERSION, 2)
def create(self, dbfile): stderr = StringIO() bdb = backupdb.get_backupdb(dbfile, stderr=stderr) self.failUnless(bdb, "unable to create backupdb from %r" % (dbfile, )) return bdb
def run(self): options = self.options nodeurl = options['node-url'] self.verbosity = 1 if options['quiet']: self.verbosity = 0 if options['verbose']: self.verbosity = 2 stdout = options.stdout stderr = options.stderr start_timestamp = datetime.datetime.now() bdbfile = os.path.join(options["node-directory"], "private", "backupdb.sqlite") bdbfile = abspath_expanduser_unicode(bdbfile) self.backupdb = backupdb.get_backupdb(bdbfile, stderr) if not self.backupdb: print("ERROR: Unable to load backup db.", file=stderr) return 1 try: rootcap, path = get_alias(options.aliases, options.to_dir, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 to_url = nodeurl + "uri/%s/" % url_quote(rootcap) if path: to_url += escape_path(path) if not to_url.endswith("/"): to_url += "/" archives_url = to_url + "Archives/" archives_url = archives_url.rstrip("/") to_url = to_url.rstrip("/") # first step: make sure the target directory exists, as well as the # Archives/ subdirectory. resp = do_http("GET", archives_url + "?t=json") if resp.status == 404: resp = do_http("POST", archives_url + "?t=mkdir") if resp.status != 200: print(format_http_error("Unable to create target directory", resp), file=stderr) return 1 # second step: process the tree targets = list( collect_backup_targets( options.from_dir, listdir_unicode, self.options.filter_listdir, )) completed = run_backup( warn=self.warn, upload_file=self.upload, upload_directory=self.upload_directory, targets=targets, start_timestamp=start_timestamp, stdout=stdout, ) new_backup_dircap = completed.dircap # third: attach the new backup to the list now = time_format.iso_utc(int(time.time()), sep="_") + "Z" put_child(archives_url, now, new_backup_dircap) put_child(to_url, "Latest", new_backup_dircap) print(completed.report( self.verbosity, self._files_checked, self._directories_checked, ), file=stdout) # The command exits with code 2 if files or directories were skipped if completed.any_skips(): return 2 # done! return 0
def test_backup(self): self.basedir = "cli/Backup/backup" self.set_up_grid() # is the backupdb available? If so, we test that a second backup does # not create new directories. hush = StringIO() bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"), hush) self.failUnless(bdb) # create a small local directory with a couple of files source = os.path.join(self.basedir, "home") fileutil.make_dirs(os.path.join(source, "empty")) self.writeto("parent/subdir/foo.txt", "foo") self.writeto("parent/subdir/bar.txt", "bar\n" * 1000) self.writeto("parent/blah.txt", "blah") def do_backup(verbose=False): cmd = ["backup"] if verbose: cmd.append("--verbose") cmd.append(source) cmd.append("tahoe:backups") return self.do_cli(*cmd) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: do_backup()) def _check0((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # foo.txt, bar.txt, blah.txt self.failUnlessReallyEqual(fu, 3) self.failUnlessReallyEqual(fr, 0) self.failUnlessReallyEqual(fs, 0) # empty, home, home/parent, home/parent/subdir self.failUnlessReallyEqual(dc, 4) self.failUnlessReallyEqual(dr, 0) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check0) d.addCallback(lambda res: self.do_cli("ls", "--uri", "tahoe:backups")) def _check1((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) lines = out.split("\n") children = dict([line.split() for line in lines if line]) latest_uri = children["Latest"] self.failUnless(latest_uri.startswith("URI:DIR2-CHK:"), latest_uri) childnames = children.keys() self.failUnlessReallyEqual(sorted(childnames), ["Archives", "Latest"]) d.addCallback(_check1) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest")) def _check2((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(sorted(out.split()), ["empty", "parent"]) d.addCallback(_check2) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest/empty")) def _check2a((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out.strip(), "") d.addCallback(_check2a) d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt")) def _check3((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, "foo") d.addCallback(_check3) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check4((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.old_archives = out.split() self.failUnlessReallyEqual(len(self.old_archives), 1) d.addCallback(_check4) d.addCallback(self.stall, 1.1) d.addCallback(lambda res: do_backup()) def _check4a((rc, out, err)): # second backup should reuse everything, if the backupdb is # available self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # foo.txt, bar.txt, blah.txt self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 3) self.failUnlessReallyEqual(fs, 0) # empty, home, home/parent, home/parent/subdir self.failUnlessReallyEqual(dc, 0) self.failUnlessReallyEqual(dr, 4) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check4a) # sneak into the backupdb, crank back the "last checked" # timestamp to force a check on all files def _reset_last_checked(res): dbfile = os.path.join(self.get_clientdir(), "private", "backupdb.sqlite") self.failUnless(os.path.exists(dbfile), dbfile) bdb = backupdb.get_backupdb(dbfile) bdb.cursor.execute("UPDATE last_upload SET last_checked=0") bdb.cursor.execute("UPDATE directories SET last_checked=0") bdb.connection.commit() d.addCallback(_reset_last_checked) d.addCallback(self.stall, 1.1) d.addCallback(lambda res: do_backup(verbose=True)) def _check4b((rc, out, err)): # we should check all files, and re-use all of them. None of # the directories should have been changed, so we should # re-use all of them too. self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) fchecked, dchecked = self.count_output2(out) self.failUnlessReallyEqual(fchecked, 3) self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 3) self.failUnlessReallyEqual(fs, 0) self.failUnlessReallyEqual(dchecked, 4) self.failUnlessReallyEqual(dc, 0) self.failUnlessReallyEqual(dr, 4) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check4b) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check5((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() self.failUnlessReallyEqual(len(self.new_archives), 3, out) # the original backup should still be the oldest (i.e. sorts # alphabetically towards the beginning) self.failUnlessReallyEqual(sorted(self.new_archives)[0], self.old_archives[0]) d.addCallback(_check5) d.addCallback(self.stall, 1.1) def _modify(res): self.writeto("parent/subdir/foo.txt", "FOOF!") # and turn a file into a directory os.unlink(os.path.join(source, "parent/blah.txt")) os.mkdir(os.path.join(source, "parent/blah.txt")) self.writeto("parent/blah.txt/surprise file", "surprise") self.writeto("parent/blah.txt/surprisedir/subfile", "surprise") # turn a directory into a file os.rmdir(os.path.join(source, "empty")) self.writeto("empty", "imagine nothing being here") return do_backup() d.addCallback(_modify) def _check5a((rc, out, err)): # second backup should reuse bar.txt (if backupdb is available), # and upload the rest. None of the directories can be reused. self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # new foo.txt, surprise file, subfile, empty self.failUnlessReallyEqual(fu, 4) # old bar.txt self.failUnlessReallyEqual(fr, 1) self.failUnlessReallyEqual(fs, 0) # home, parent, subdir, blah.txt, surprisedir self.failUnlessReallyEqual(dc, 5) self.failUnlessReallyEqual(dr, 0) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check5a) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check6((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() self.failUnlessReallyEqual(len(self.new_archives), 4) self.failUnlessReallyEqual(sorted(self.new_archives)[0], self.old_archives[0]) d.addCallback(_check6) d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt")) def _check7((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, "FOOF!") # the old snapshot should not be modified return self.do_cli("get", "tahoe:backups/Archives/%s/parent/subdir/foo.txt" % self.old_archives[0]) d.addCallback(_check7) def _check8((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, "foo") d.addCallback(_check8) return d
def create(self, dbfile): stderr = StringIO() bdb = backupdb.get_backupdb(dbfile, stderr=stderr) self.failUnless(bdb, "unable to create backupdb from %r" % (dbfile,)) return bdb
def test_backup(self): self.basedir = "cli/Backup/backup" self.set_up_grid(oneshare=True) # is the backupdb available? If so, we test that a second backup does # not create new directories. hush = StringIO() bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"), hush) self.failUnless(bdb) # create a small local directory with a couple of files source = os.path.join(self.basedir, "home") fileutil.make_dirs(os.path.join(source, "empty")) self.writeto("parent/subdir/foo.txt", "foo") self.writeto("parent/subdir/bar.txt", "bar\n" * 1000) self.writeto("parent/blah.txt", "blah") def do_backup(verbose=False): cmd = ["backup"] if verbose: cmd.append("--verbose") cmd.append(source) cmd.append("tahoe:backups") return self.do_cli(*cmd) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: do_backup()) def _check0((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # foo.txt, bar.txt, blah.txt self.failUnlessReallyEqual(fu, 3) self.failUnlessReallyEqual(fr, 0) self.failUnlessReallyEqual(fs, 0) # empty, home, home/parent, home/parent/subdir self.failUnlessReallyEqual(dc, 4) self.failUnlessReallyEqual(dr, 0) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check0) d.addCallback(lambda res: self.do_cli("ls", "--uri", "tahoe:backups")) def _check1((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) lines = out.split("\n") children = dict([line.split() for line in lines if line]) latest_uri = children["Latest"] self.failUnless(latest_uri.startswith("URI:DIR2-CHK:"), latest_uri) childnames = children.keys() self.failUnlessReallyEqual(sorted(childnames), ["Archives", "Latest"]) d.addCallback(_check1) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest")) def _check2((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(sorted(out.split()), ["empty", "parent"]) d.addCallback(_check2) d.addCallback( lambda res: self.do_cli("ls", "tahoe:backups/Latest/empty")) def _check2a((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out.strip(), "") d.addCallback(_check2a) d.addCallback(lambda res: self.do_cli( "get", "tahoe:backups/Latest/parent/subdir/foo.txt")) def _check3((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, "foo") d.addCallback(_check3) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check4((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.old_archives = out.split() self.failUnlessReallyEqual(len(self.old_archives), 1) d.addCallback(_check4) d.addCallback(self.stall, 1.1) d.addCallback(lambda res: do_backup()) def _check4a((rc, out, err)): # second backup should reuse everything, if the backupdb is # available self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # foo.txt, bar.txt, blah.txt self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 3) self.failUnlessReallyEqual(fs, 0) # empty, home, home/parent, home/parent/subdir self.failUnlessReallyEqual(dc, 0) self.failUnlessReallyEqual(dr, 4) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check4a) # sneak into the backupdb, crank back the "last checked" # timestamp to force a check on all files def _reset_last_checked(res): dbfile = os.path.join(self.get_clientdir(), "private", "backupdb.sqlite") self.failUnless(os.path.exists(dbfile), dbfile) bdb = backupdb.get_backupdb(dbfile) bdb.cursor.execute("UPDATE last_upload SET last_checked=0") bdb.cursor.execute("UPDATE directories SET last_checked=0") bdb.connection.commit() d.addCallback(_reset_last_checked) d.addCallback(self.stall, 1.1) d.addCallback(lambda res: do_backup(verbose=True)) def _check4b((rc, out, err)): # we should check all files, and re-use all of them. None of # the directories should have been changed, so we should # re-use all of them too. self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) fchecked, dchecked = self.count_output2(out) self.failUnlessReallyEqual(fchecked, 3) self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 3) self.failUnlessReallyEqual(fs, 0) self.failUnlessReallyEqual(dchecked, 4) self.failUnlessReallyEqual(dc, 0) self.failUnlessReallyEqual(dr, 4) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check4b) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check5((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() self.failUnlessReallyEqual(len(self.new_archives), 3, out) # the original backup should still be the oldest (i.e. sorts # alphabetically towards the beginning) self.failUnlessReallyEqual( sorted(self.new_archives)[0], self.old_archives[0]) d.addCallback(_check5) d.addCallback(self.stall, 1.1) def _modify(res): self.writeto("parent/subdir/foo.txt", "FOOF!") # and turn a file into a directory os.unlink(os.path.join(source, "parent/blah.txt")) os.mkdir(os.path.join(source, "parent/blah.txt")) self.writeto("parent/blah.txt/surprise file", "surprise") self.writeto("parent/blah.txt/surprisedir/subfile", "surprise") # turn a directory into a file os.rmdir(os.path.join(source, "empty")) self.writeto("empty", "imagine nothing being here") return do_backup() d.addCallback(_modify) def _check5a((rc, out, err)): # second backup should reuse bar.txt (if backupdb is available), # and upload the rest. None of the directories can be reused. self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # new foo.txt, surprise file, subfile, empty self.failUnlessReallyEqual(fu, 4) # old bar.txt self.failUnlessReallyEqual(fr, 1) self.failUnlessReallyEqual(fs, 0) # home, parent, subdir, blah.txt, surprisedir self.failUnlessReallyEqual(dc, 5) self.failUnlessReallyEqual(dr, 0) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check5a) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check6((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() self.failUnlessReallyEqual(len(self.new_archives), 4) self.failUnlessReallyEqual( sorted(self.new_archives)[0], self.old_archives[0]) d.addCallback(_check6) d.addCallback(lambda res: self.do_cli( "get", "tahoe:backups/Latest/parent/subdir/foo.txt")) def _check7((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, "FOOF!") # the old snapshot should not be modified return self.do_cli( "get", "tahoe:backups/Archives/%s/parent/subdir/foo.txt" % self.old_archives[0]) d.addCallback(_check7) def _check8((rc, out, err)): self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, "foo") d.addCallback(_check8) return d
def run(self): options = self.options nodeurl = options['node-url'] self.verbosity = 1 if options['quiet']: self.verbosity = 0 if options['verbose']: self.verbosity = 2 stdout = options.stdout stderr = options.stderr start_timestamp = datetime.datetime.now() bdbfile = os.path.join(options["node-directory"], "private", "backupdb.sqlite") bdbfile = abspath_expanduser_unicode(bdbfile) self.backupdb = backupdb.get_backupdb(bdbfile, stderr) if not self.backupdb: print >>stderr, "ERROR: Unable to load backup db." return 1 try: rootcap, path = get_alias(options.aliases, options.to_dir, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 to_url = nodeurl + "uri/%s/" % urllib.quote(rootcap) if path: to_url += escape_path(path) if not to_url.endswith("/"): to_url += "/" archives_url = to_url + "Archives/" # first step: make sure the target directory exists, as well as the # Archives/ subdirectory. resp = do_http("GET", archives_url + "?t=json") if resp.status == 404: resp = do_http("POST", archives_url + "?t=mkdir") if resp.status != 200: print >>stderr, format_http_error("Unable to create target directory", resp) return 1 # second step: process the tree targets = list(collect_backup_targets( options.from_dir, listdir_unicode, self.options.filter_listdir, )) completed = run_backup( warn=self.warn, upload_file=self.upload, upload_directory=self.upload_directory, targets=targets, start_timestamp=start_timestamp, stdout=stdout, ) new_backup_dircap = completed.dircap # third: attach the new backup to the list now = time_format.iso_utc(int(time.time()), sep="_") + "Z" put_child(archives_url, now, new_backup_dircap) put_child(to_url, "Latest", new_backup_dircap) print >>stdout, completed.report( self.verbosity, self._files_checked, self._directories_checked, ) # The command exits with code 2 if files or directories were skipped if completed.any_skips(): return 2 # done! return 0
def test_backup(self): self.basedir = "cli/Backup/backup" self.set_up_grid(oneshare=True) # is the backupdb available? If so, we test that a second backup does # not create new directories. hush = StringIO() bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"), hush) self.failUnless(bdb) # create a small local directory with a couple of files source = os.path.join(self.basedir, "home") fileutil.make_dirs(os.path.join(source, "empty")) self.writeto("parent/subdir/foo.txt", "foo") self.writeto("parent/subdir/bar.txt", "bar\n" * 1000) self.writeto("parent/blah.txt", "blah") def do_backup(verbose=False): cmd = ["backup"] if verbose: cmd.append("--verbose") cmd.append(source) cmd.append("tahoe:backups") return self.do_cli(*cmd) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: do_backup(True)) def _check0(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) ( files_uploaded, files_reused, files_skipped, directories_created, directories_reused, directories_skipped, ) = self.count_output(out) # foo.txt, bar.txt, blah.txt self.failUnlessReallyEqual(files_uploaded, 3) self.failUnlessReallyEqual(files_reused, 0) self.failUnlessReallyEqual(files_skipped, 0) # empty, home, home/parent, home/parent/subdir self.failUnlessReallyEqual(directories_created, 4) self.failUnlessReallyEqual(directories_reused, 0) self.failUnlessReallyEqual(directories_skipped, 0) # This is the first-upload scenario so there should have been # nothing to check. (files_checked, directories_checked) = self.count_output2(out) self.failUnlessReallyEqual(files_checked, 0) self.failUnlessReallyEqual(directories_checked, 0) progress = self.progress_output(out) for left, right in zip(progress[:-1], progress[1:]): # Progress as measured by file count should progress # monotonically. self.assertTrue( left[0] < right[0], "Failed: {} < {}".format(left[0], right[0]), ) # Total work to do should remain the same. self.assertEqual(left[1], right[1]) # Amount of elapsed time should only go up. Allow it to # remain the same to account for resolution of the report. self.assertTrue( left[2] <= right[2], "Failed: {} <= {}".format(left[2], right[2]), ) for element in progress: # Can't have more progress than the total. self.assertTrue( element[0] <= element[1], "Failed: {} <= {}".format(element[0], element[1]), ) d.addCallback(_check0) d.addCallback(lambda res: self.do_cli("ls", "--uri", "tahoe:backups")) def _check1(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) lines = out.split("\n") children = dict([line.split() for line in lines if line]) latest_uri = children["Latest"] self.failUnless(latest_uri.startswith("URI:DIR2-CHK:"), latest_uri) childnames = children.keys() self.failUnlessReallyEqual(sorted(childnames), ["Archives", "Latest"]) d.addCallback(_check1) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest")) def _check2(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(sorted(out.split()), ["empty", "parent"]) d.addCallback(_check2) d.addCallback( lambda res: self.do_cli("ls", "tahoe:backups/Latest/empty")) def _check2a(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out.strip(), "") d.addCallback(_check2a) d.addCallback(lambda res: self.do_cli( "get", "tahoe:backups/Latest/parent/subdir/foo.txt")) def _check3(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, "foo") d.addCallback(_check3) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check4(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.old_archives = out.split() self.failUnlessReallyEqual(len(self.old_archives), 1) d.addCallback(_check4) d.addCallback(self.stall, 1.1) d.addCallback(lambda res: do_backup()) def _check4a(args): # second backup should reuse everything, if the backupdb is # available (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # foo.txt, bar.txt, blah.txt self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 3) self.failUnlessReallyEqual(fs, 0) # empty, home, home/parent, home/parent/subdir self.failUnlessReallyEqual(dc, 0) self.failUnlessReallyEqual(dr, 4) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check4a) # sneak into the backupdb, crank back the "last checked" # timestamp to force a check on all files def _reset_last_checked(res): dbfile = self.get_client_config().get_private_path( "backupdb.sqlite") self.failUnless(os.path.exists(dbfile), dbfile) bdb = backupdb.get_backupdb(dbfile) bdb.cursor.execute("UPDATE last_upload SET last_checked=0") bdb.cursor.execute("UPDATE directories SET last_checked=0") bdb.connection.commit() d.addCallback(_reset_last_checked) d.addCallback(self.stall, 1.1) d.addCallback(lambda res: do_backup(verbose=True)) def _check4b(args): # we should check all files, and re-use all of them. None of # the directories should have been changed, so we should # re-use all of them too. (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) fchecked, dchecked = self.count_output2(out) self.failUnlessReallyEqual(fchecked, 3) self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 3) self.failUnlessReallyEqual(fs, 0) self.failUnlessReallyEqual(dchecked, 4) self.failUnlessReallyEqual(dc, 0) self.failUnlessReallyEqual(dr, 4) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check4b) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check5(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() self.failUnlessReallyEqual(len(self.new_archives), 3, out) # the original backup should still be the oldest (i.e. sorts # alphabetically towards the beginning) self.failUnlessReallyEqual( sorted(self.new_archives)[0], self.old_archives[0]) d.addCallback(_check5) d.addCallback(self.stall, 1.1) def _modify(res): self.writeto("parent/subdir/foo.txt", "FOOF!") # and turn a file into a directory os.unlink(os.path.join(source, "parent/blah.txt")) os.mkdir(os.path.join(source, "parent/blah.txt")) self.writeto("parent/blah.txt/surprise file", "surprise") self.writeto("parent/blah.txt/surprisedir/subfile", "surprise") # turn a directory into a file os.rmdir(os.path.join(source, "empty")) self.writeto("empty", "imagine nothing being here") return do_backup() d.addCallback(_modify) def _check5a(args): # second backup should reuse bar.txt (if backupdb is available), # and upload the rest. None of the directories can be reused. (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # new foo.txt, surprise file, subfile, empty self.failUnlessReallyEqual(fu, 4) # old bar.txt self.failUnlessReallyEqual(fr, 1) self.failUnlessReallyEqual(fs, 0) # home, parent, subdir, blah.txt, surprisedir self.failUnlessReallyEqual(dc, 5) self.failUnlessReallyEqual(dr, 0) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check5a) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check6(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() self.failUnlessReallyEqual(len(self.new_archives), 4) self.failUnlessReallyEqual( sorted(self.new_archives)[0], self.old_archives[0]) d.addCallback(_check6) d.addCallback(lambda res: self.do_cli( "get", "tahoe:backups/Latest/parent/subdir/foo.txt")) def _check7(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, "FOOF!") # the old snapshot should not be modified return self.do_cli( "get", "tahoe:backups/Archives/%s/parent/subdir/foo.txt" % self.old_archives[0]) d.addCallback(_check7) def _check8(args): (rc, out, err) = args self.failUnlessReallyEqual(err, "") self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, "foo") d.addCallback(_check8) return d