def list_backups(ctx, filesystem): """ List backups. """ config_path = ctx.obj['config_path'] logger = ctx.obj['logger'] logger.setLevel('CRITICAL') config = Config(config_path) if filesystem: job = config.jobs.get(filesystem) jobs = {filesystem: job} if job is None: print('Filesystem does not exist in config file.') sys.exit(1) else: jobs = config.jobs if jobs is None: print('No filesystems exist in config file.') sys.exit(1) for filesystem, job in jobs.items(): print(f'{filesystem}:\n') print('{0:<16} {1:<16} {2:<5} {3:<14}'.format('time', 'dependency', 'type', 'size (bytes)')) print('-' * 52) for b in job.backup_db.get_backups(): dependency = b.dependency or str(b.dependency) backup_size = b.backup_size or str(b.backup_size) print(f'{b.backup_time:<16} {dependency:<16} ' f'{b.backup_type:<5} {backup_size:<14}') print('\n')
def setUp(self): warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>") config = Config('config.cfg') self.job = next(iter(config.jobs.values())) self.bucket = self.job.bucket self.filesystem = self.job.filesystem
def setUp(self): warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>") config = Config('config.cfg') self.job = next(iter(config.jobs.values())) self.filesystem = self.job.filesystem out = create_filesystem(self.job.filesystem) self.assertEqual(0, out.returncode, msg=out.stderr)
def setUp(self): # Given config = Config('config.cfg') job = next(iter(config.jobs.values())) self.filesystem = job.filesystem self.snapshot_name = 'snap_1' self.test_file = f'/{self.filesystem}/test_file' self.test_data = str(list(range(100_000))) out = create_filesystem(self.filesystem) self.assertEqual(0, out.returncode, msg=out.stderr) with open(self.test_file, 'w') as f: f.write(self.test_data)
def setUp(self): warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>") self.runner = CliRunner() config = Config('config.cfg') self.job = next(iter(config.jobs.values())) self.bucket = self.job.bucket self.test_file = f'/{self.job.filesystem}/test_file' self.test_data = str(list(range(100_000))) out = create_filesystem(self.job.filesystem) self.assertEqual(0, out.returncode, msg=out.stderr) with open(self.test_file, 'w') as f: f.write(self.test_data)
def backup(ctx): """ Start backup job scheduler. """ config_path = ctx.obj['config_path'] logger = ctx.obj['logger'] config = Config(config_path) scheduler = BlockingScheduler( executors={'default': ThreadPoolExecutor(max_workers=1)}, job_defaults={'misfire_grace_time': None}) for job in config.jobs.values(): logger.info(f'filesystem={job.filesystem} ' f'cron="{job.cron}" ' 'msg="Adding job."') scheduler.add_job(job.start, 'cron', **job.cron, coalesce=True) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass
def restore(ctx, destination, filesystem, backup_time): """ Restore from backup. Defaults to most recent backup if backup-time is not specified. WARNING: If restoring to a file system that already exists, snapshots and data that were written after the backup will be destroyed. Set `destination` in order to restore to a new file system. """ config_path = ctx.obj['config_path'] config = Config(config_path) job = config.jobs.get(filesystem) if job is None: print('Filesystem does not exist.') sys.exit(1) job.restore(backup_time, destination) print('Restore successful.')