def backup_restore_compare(self): # Run a full backup b = Run("testbackup", const.FullBackup, self.options) b.run() # Make sure we have ticked to another second since the start of the last backup. while datetime.now() - b.start_time < timedelta(seconds=1): time.sleep(0.01) # Attempt to restore every file r = Restore(self.restore_folder, [self.files_folder], datetime.now(), self.options) r.run() # Check that the restored folder and original folder are identical left = unicode(self.files_folder) right = unicode(os.path.join(self.restore_folder, self.files_folder[1:])) d = utils.dircmp(left, right) self.assertEqual(d.left_only, set()) self.assertEqual(d.right_only, set()) self.assertEqual(d.diff_files, set()) self.assertTrue(len(d.same_files) > 0) # Check that all files are in the DB for folder, _, local_files in os.walk(self.files_folder): for file in local_files: path = os.path.join(file, folder) # This will raise an exception if it does not exist self.db.select_path(path, build=False)
def testBadVerifyEncrypted(self): backup = self.config.backups[self.backup.name] backup.encrypt = True self.config.backups[backup.name] = backup # Run a full backup b = Run("testbackup", const.FullBackup, self.options) b.run() # Get the times runs = self.db.runs("testbackup") run = runs[0] # Get the location of the data file from the streamer streamer = StreamOut(None, self.store, b.backup_folder) datafile = os.path.join(self.store.root, streamer.get_path(0)) # Now corrupt the data file a little size = os.path.getsize(datafile) with open(datafile, "r+b") as f: f.seek(size // 2, 0) f.write("X") v = Verify("testbackup", run.start_time) self.assertRaises(Exception, v.run)
def do_backup(options): if options.full: backup_type = const.FullBackup else: backup_type = const.IncrBackup for name in options.backupname: try: r = Run(name, backup_type, options) r.run() except Exception as e: print("Failed: " + str(e))
def testVerify(self): # Run a full backup b = Run("testbackup", const.FullBackup, self.options) b.run() # Get the times runs = self.db.runs("testbackup") run = runs[0] v = Verify("testbackup", run.start_time) self.assertTrue(v.run())
def testCheckFiles(self): self.backup.include_packages = True b = Run("testbackup", const.FullBackup, self.options) b.run() # Check that all the right files are there. runs = self.db.runs(self.backup.name) self.assertEqual(len(runs), 1) run = runs[0] folder = run.folder self.assertTrue(self.store.exists(os.path.join(folder, const.PackageFile + const.EncryptionSuffix))) self.assertTrue(self.store.exists(os.path.join(folder, const.LOFFile + const.EncryptionSuffix))) self.assertTrue(self.store.exists(os.path.join(folder, const.ConfigName + const.EncryptionSuffix))) self.assertTrue(self.store.exists)
def testChanges(self): pass # Full Backup # change a file # Incremental backup # Restore most recent. ensure you get latest file # Restore to just prior to incremental, ensure you get earlier file # Run a full backup file = os.path.join(self.files_folder, "changer") restore_file = os.path.join(self.restore_folder, file[1:]) # t=0 - file does not exist b = Run("testbackup", const.FullBackup, self.options) b.run() # Make sure we have ticked to another second since the start of the last backup. while datetime.now() - b.start_time < timedelta(seconds=1): time.sleep(0.01) # t=1 - file exists with open(file, "w") as f: f.write("1") b = Run("testbackup", const.IncrBackup, self.options) b.run() # Make sure we have ticked to another second since the start of the last backup. while datetime.now() - b.start_time < timedelta(seconds=1): time.sleep(0.01) # t=2 - file changed with open(file, "w") as f: f.write("2") b = Run("testbackup", const.IncrBackup, self.options) b.run() # Get the times runs = self.db.runs("testbackup") t0 = runs[0].start_time t1 = runs[1].start_time t2 = runs[2].start_time for t, exists, contents in [(t0, False, None), (t1, True, "1"), (t2, True, "2"), (None, True, "2")]: # Attempt to restore most recent of ALL files # This tests the default restore. r = Restore(self.restore_folder, [self.files_folder], t, self.options) r.run() if exists: with open(restore_file, "r") as f: self.assertEqual(f.read(), contents) else: self.assertFalse(os.path.exists(restore_file)) # clean shutil.rmtree(self.restore_folder) utils.makedirs(self.restore_folder)
def testBadConfig(self): # Run a full backup b = Run("testbackup", const.FullBackup, self.options) b.run() # Get the times runs = self.db.runs("testbackup") run = runs[0] # Delete The Config File configfile = os.path.join(run.folder, const.ConfigName) self.store.remove_file(configfile) v = Verify("testbackup", run.start_time) self.assertRaises(Exception, v.run)
def testAutoManagementOfStore1(self): # Run a set of backups that will overload the store. # The automanaged store should continue to archive old backups as required. # Store space reclaimation happens across all backups (i.e. any run). # We should see older runs from the first backup disappear. max_size, dummy, dummy = self.store.limit_details() filesize = utils.du(self.backup.include_folders[0]) # Lets make sure we are going to do enough backups that # the older ones will be removed. RunCount = (max_size // filesize) + 2 last_start = None for cycle in xrange(RunCount): if last_start: # Make sure we have ticked to another second since the start of the last backup. while datetime.now() - last_start < timedelta(seconds=1): time.sleep(0.01) backup = Backup(self.backup.name + str(cycle)) backup.include_folders = self.backup.include_folders backup.store = self.backup.store backup.notify_msg = False self.config.backups[backup.name] = backup # Run a full backup b = Run(backup.name, const.FullBackup, self.options) b.run() last_start = b.start_time # Assert that the store is still of an appropriate size size, used, avail = self.store.current_usage() self.assertTrue(avail >= 0) self.assertTrue(used <= max_size) # Confirm that's true on disk disksize = utils.du(self.store.root) self.assertTrue(disksize <= max_size) # Check that some runs have actually been deleted runs = self.db.runs(self.backup.name + "0") self.assertTrue(len(runs) == 0) runs = self.db.runs(self.backup.name + "1") self.assertTrue(len(runs) == 0)
def testAutoManagementOfStore2(self): # Run one backup multiple times to overload a store max_size, dummy, dummy = self.store.limit_details() filesize = utils.du(self.backup.include_folders[0]) # Lets make sure we are going to do enough backups that # the older ones will be removed. RunCount = (max_size // filesize) + 2 last_start = None for cycle in xrange(RunCount): if last_start: # Make sure we have ticked to another second since the start of the last backup. while datetime.now() - last_start < timedelta(seconds=1): time.sleep(0.01) # Run a full backup b = Run(self.backup.name, const.FullBackup, self.options) b.run() last_start = b.start_time # Assert that the store is still of an appropriate size size, used, avail = self.store.current_usage() self.assertTrue(avail >= 0) self.assertTrue(used <= max_size) # Confirm that's true on disk disksize = utils.du(self.store.root) self.assertTrue(disksize <= max_size) # Check that some runs have actually been deleted runs = self.db.runs(self.backup.name) self.assertTrue(len(runs) < RunCount)
def testLongRun(self): # Run a full backup b = Run(self.backup.name, const.FullBackup, self.options) b.run() for cycle in xrange(self.cycles): print(str(cycle)+"\r") time.sleep(1) # Change some files with open(os.path.join(self.files_folder, "incr"), "w") as f: f.write(os.urandom(100)) with open(os.path.join(self.files_folder, str(cycle)), "w") as f: f.write(os.urandom(100)) # Run an incr backup b = Run(self.backup.name, const.IncrBackup, self.options) b.run() # Attempt to restore every file r = Restore(self.restore_folder, [self.files_folder], datetime.now(), self.options) r.run() # Lets break it # os.remove(os.path.join(self.restore_folder, self.files_folder[1:], "1")) # with open(os.path.join(self.files_folder, "incr"), "w") as f: # f.write("-1") # with open(os.path.join(self.restore_folder, self.files_folder[1:], "8"), "w") as f: # f.write("-1") # Check that the restored folder and original folder are identical left = unicode(self.files_folder) right = unicode(os.path.join(self.restore_folder, self.files_folder[1:])) d = utils.dircmp(left, right) self.assertEqual(d.left_only, set()) self.assertEqual(d.right_only, set()) self.assertEqual(d.diff_files, set()) self.assertTrue(len(d.same_files) > 0) # Check that all files are in the DB for folder, _, local_files in os.walk(self.files_folder): for file in local_files: path = os.path.join(file, folder) # This will raise an exception if it does not exist self.db.select_path(path, build=False)
def run_cycle_test(self): options = BlankClass() options.dry_run = False options.message = False options.email = False options.shutdown = False options.norecurse = False # Run a full backup b = Run("testbackup1", const.FullBackup, options) b.run() # Run a full backup b = Run("testbackup2", const.FullBackup, options) b.run() # Now restore two files, one that will be on each store. restore_file1 = os.path.join(self.files_folder, "dir1", "f2.mp3") dest_file1 = os.path.join(self.restore_folder, restore_file1[1:]) restore_file2 = os.path.join(self.files_folder, "dir2", "f3.exe") dest_file2 = os.path.join(self.restore_folder, restore_file2[1:]) restore_file3 = os.path.join(self.files_folder, "dir3", "f4.txt") dest_file3 = os.path.join(self.restore_folder, restore_file3[1:]) r = Restore(self.restore_folder, [restore_file1, restore_file2, restore_file3], datetime.now(), options) r.run() for path in [dest_file1, dest_file2, dest_file3]: if not os.path.exists(path): raise Exception("File %s was not restored" % path) if open(path).read() != self.teststring1: raise Exception("Restored file contents incorrect %s" % path) os.remove(path) # Make sure the store is the right size for name in self.config.storage: store = self.config.storage[name].copy() size, used, avail = store.current_usage() log.debug("Store", store.name, "size", size, "used", used, "avail", avail) if store.auto_manage and used > size: raise Exception("Store %s has grown too large" % store.name) ######################PART 2 #wait a little time.sleep(1.1) for path in [restore_file1, restore_file2, restore_file3]: # Make a change with open(path, "w") as f: f.write(self.teststring2) #wait a little time.sleep(1.1) # Run an incremental backup b = Run("testbackup1", const.IncrBackup, options) b.run() # Run an incremental backup b = Run("testbackup2", const.IncrBackup, options) b.run() time.sleep(1.1) r = Restore(self.restore_folder, [restore_file1, restore_file2, restore_file3], datetime.now(), options) r.run() for path in [dest_file1, dest_file2, dest_file3]: if not os.path.exists(path): raise Exception("File %s was not restored after INCR %s" % path) if open(path).read() != self.teststring2: raise Exception("Restored file contents incorrect after INCR %s" % path) # raise Exception("Test Failure") # Make sure the store is the right size for name in self.config.storage: store = self.config.storage[name].copy() size, used, avail = store.current_usage() log.debug("Store", store.name, "size", size, "used", used) if store.auto_manage and used > size: raise Exception("Store %s has grown too large" % store.name) time.sleep(1.1) # change it back for path in [restore_file1, restore_file2, restore_file3]: with open(path, "w") as f: f.write(self.teststring1)