def _init_retention_policies(self): # Set retention policy mode if self.config.retention_policy_mode != 'auto': _logger.warning( 'Unsupported retention_policy_mode "%s" for server "%s" ' '(fallback to "auto")' % ( self.config.retention_policy_mode, self.config.name)) self.config.retention_policy_mode = 'auto' # If retention_policy is present, enforce them if self.config.retention_policy: # Check wal_retention_policy if self.config.wal_retention_policy != 'main': _logger.warning( 'Unsupported wal_retention_policy value "%s" ' 'for server "%s" (fallback to "main")' % ( self.config.wal_retention_policy, self.config.name)) self.config.wal_retention_policy = 'main' # Create retention policy objects try: rp = RetentionPolicyFactory.create( self, 'retention_policy', self.config.retention_policy) # Reassign the configuration value (we keep it in one place) self.config.retention_policy = rp _logger.debug('Retention policy for server %s: %s' % ( self.config.name, self.config.retention_policy)) try: rp = RetentionPolicyFactory.create( self, 'wal_retention_policy', self.config.wal_retention_policy) # Reassign the configuration value (we keep it in one place) self.config.wal_retention_policy = rp _logger.debug( 'WAL retention policy for server %s: %s' % ( self.config.name, self.config.wal_retention_policy)) except ValueError: _logger.exception( 'Invalid wal_retention_policy setting "%s" ' 'for server "%s" (fallback to "main")' % ( self.config.wal_retention_policy, self.config.name)) rp = RetentionPolicyFactory.create( self, 'wal_retention_policy', 'main') self.config.wal_retention_policy = rp self.enforce_retention_policies = True except ValueError: _logger.exception( 'Invalid retention_policy setting "%s" for server "%s"' % ( self.config.retention_policy, self.config.name))
def test_first_backup(self): server = build_mocked_server() rp = RetentionPolicyFactory.create( server, 'retention_policy', 'RECOVERY WINDOW OF 4 WEEKS') assert isinstance(rp, RecoveryWindowRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info( server=rp.server, backup_id='test1', end_time=datetime.now(tzlocal())) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 rp.server.get_available_backups.return_value = { "test_backup": backup_info } rp.server.config.minimum_redundancy = 1 # execute retention policy report report = rp.first_backup() assert report == 'test_backup' rp = RetentionPolicyFactory.create( server, 'retention_policy', 'REDUNDANCY 2') assert isinstance(rp, RedundancyRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info( server=rp.server, backup_id='test1', end_time=datetime.now(tzlocal())) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 rp.server.get_available_backups.return_value = { "test_backup": backup_info } rp.server.config.minimum_redundancy = 1 # execute retention policy report report = rp.first_backup() assert report == 'test_backup'
def _init_retention_policies(self): # Set retention policy mode if self.config.retention_policy_mode != 'auto': _logger.warning( 'Unsupported retention_policy_mode "%s" for server "%s" (fallback to "auto")' % (self.config.retention_policy_mode, self.config.name)) self.config.retention_policy_mode = 'auto' # If retention_policy is present, enforce them if self.config.retention_policy: # Check wal_retention_policy if self.config.wal_retention_policy != 'main': _logger.warning( 'Unsupported wal_retention_policy value "%s" for server "%s" (fallback to "main")' % (self.config.wal_retention_policy, self.config.name)) self.config.wal_retention_policy = 'main' # Create retention policy objects try: rp = RetentionPolicyFactory.create( self, 'retention_policy', self.config.retention_policy) # Reassign the configuration value (we keep it in one place) self.config.retention_policy = rp _logger.info('Retention policy for server %s: %s' % (self.config.name, self.config.retention_policy)) try: rp = RetentionPolicyFactory.create( self, 'wal_retention_policy', self.config.wal_retention_policy) # Reassign the configuration value (we keep it in one place) self.wal_retention_policy = rp _logger.info( 'WAL retention policy for server %s: %s' % (self.config.name, self.config.wal_retention_policy)) except: _logger.error( 'Invalid wal_retention_policy setting "%s" for server "%s" (fallback to "main")' % (self.config.wal_retention_policy, self.config.name)) self.wal_retention_policy = SimpleWALRetentionPolicy( self.retention_policy, self) self.enforce_retention_policies = True except: _logger.error( 'Invalid retention_policy setting "%s" for server "%s"' % (self.config.retention_policy, self.config.name))
def test_recovery_window_report(self, caplog): """ Basic unit test of RecoveryWindowRetentionPolicy Given a mock simulating a Backup with status DONE and the end_date not over the point of recoverability, the report method of the RecoveryWindowRetentionPolicy class must mark it as valid """ server = build_mocked_server() rp = RetentionPolicyFactory.create( server, 'retention_policy', 'RECOVERY WINDOW OF 4 WEEKS') assert isinstance(rp, RecoveryWindowRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info( server=rp.server, backup_id='test1', end_time=datetime.now(tzlocal())) backup_source = {'test_backup3': backup_info} # Add a obsolete backup backup_info.end_time = datetime.now(tzlocal()) - timedelta(weeks=5) backup_source['test_backup2'] = backup_info # Add a second obsolete backup backup_info.end_time = datetime.now(tzlocal()) - timedelta(weeks=6) backup_source['test_backup'] = backup_info rp.server.get_available_backups.return_value = backup_source # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 rp.server.config.minimum_redundancy = 1 rp.server.config.name = "test" # execute retention policy report report = rp.report() # check that our mock is valid for the retention policy assert report == {'test_backup3': 'VALID', 'test_backup2': 'OBSOLETE', 'test_backup': 'OBSOLETE'} # Expect a ValueError if passed context is invalid with pytest.raises(ValueError): rp.report(context='invalid') # Set a new minimum_redundancy parameter, enforcing the usage of the # configuration parameter instead of the retention policy default rp.server.config.minimum_redundancy = 4 # execute retention policy report rp.report() # Check for the warning inside the log caplog.set_level(logging.WARNING) log = caplog.text warn = "WARNING Keeping obsolete backup test_backup2 for " \ "server test (older than %s) due to minimum redundancy " \ "requirements (4)\n" % rp._point_of_recoverability() assert log.find(warn)
def test_first_backup(self, server): rp = RetentionPolicyFactory.create("retention_policy", "RECOVERY WINDOW OF 4 WEEKS", server) assert isinstance(rp, RecoveryWindowRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info(server=server, backup_id="test0", end_time=datetime.now(tzlocal())) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 server.get_available_backups.return_value = { "test_backup": backup_info } server.config.minimum_redundancy = 1 # execute retention policy report report = rp.first_backup() assert report == "test_backup" rp = RetentionPolicyFactory.create("retention_policy", "REDUNDANCY 2", server=server) assert isinstance(rp, RedundancyRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info(server=server, backup_id="test1", end_time=datetime.now(tzlocal())) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 server.get_available_backups.return_value = { "test_backup": backup_info } server.config.minimum_redundancy = 1 # execute retention policy report report = rp.first_backup() assert report == "test_backup"
def test_recovery_window_report(self, caplog): """ Basic unit test of RecoveryWindowRetentionPolicy Given a mock simulating a Backup with status DONE and the end_date not over the point of recoverability, the report method of the RecoveryWindowRetentionPolicy class must mark it as valid """ server = build_mocked_server() rp = RetentionPolicyFactory.create(server, 'retention_policy', 'RECOVERY WINDOW OF 4 WEEKS') assert isinstance(rp, RecoveryWindowRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info(server=rp.server, backup_id='test1', end_time=datetime.now(tzlocal())) backup_source = {'test_backup3': backup_info} # Add a obsolete backup backup_info.end_time = datetime.now(tzlocal()) - timedelta(weeks=5) backup_source['test_backup2'] = backup_info # Add a second obsolete backup backup_info.end_time = datetime.now(tzlocal()) - timedelta(weeks=6) backup_source['test_backup'] = backup_info rp.server.get_available_backups.return_value = backup_source # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 rp.server.config.minimum_redundancy = 1 rp.server.config.name = "test" # execute retention policy report report = rp.report() # check that our mock is valid for the retention policy assert report == { 'test_backup3': 'VALID', 'test_backup2': 'OBSOLETE', 'test_backup': 'OBSOLETE' } # Expect a ValueError if passed context is invalid with pytest.raises(ValueError): rp.report(context='invalid') # Set a new minimum_redundancy parameter, enforcing the usage of the # configuration parameter instead of the retention policy default rp.server.config.minimum_redundancy = 4 # execute retention policy report rp.report() # Check for the warning inside the log caplog.set_level(logging.WARNING) log = caplog.text warn = "WARNING Keeping obsolete backup test_backup2 for " \ "server test (older than %s) due to minimum redundancy " \ "requirements (4)\n" % rp._point_of_recoverability() assert log.find(warn)
def build_redundancy_retention(): """ Build RedundancyRetentionPolicy with redundancy 2 :return RedundancyRetentionPolicy: a RedundancyRetentionPolicy instance """ # instantiate a retention policy object using mocked parameters server = Mock(name='server') rp = RetentionPolicyFactory.create(server, 'retention_policy', 'REDUNDANCY 2') return rp
def build_recovery_window_retention(): """ Build RecoveryWindowRetentionPolicy with recovery window of 4 weeks :return RecoveryWindowRetentionPolicy: a RecoveryWindowRetentionPolicy instance """ # instantiate a retention policy object using mocked parameters server = Mock(name='server') rp = RetentionPolicyFactory.create(server, 'retention_policy', 'RECOVERY WINDOW OF 4 WEEKS') return rp
def test_redundancy_report(self, server, caplog): """ Test of the management of the minimum_redundancy parameter into the backup_report method of the RedundancyRetentionPolicy class """ rp = RetentionPolicyFactory.create( "retention_policy", "REDUNDANCY 2", server=server ) assert isinstance(rp, RedundancyRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info( server=server, backup_id="test1", end_time=datetime.now(tzlocal()) ) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 server.get_available_backups.return_value = { "test_backup": backup_info, "test_backup2": backup_info, "test_backup3": backup_info, } server.config.minimum_redundancy = 1 # execute retention policy report report = rp.report() # check that our mock is valid for the retention policy because # the total number of valid backups is lower than the retention policy # redundancy. assert report == { "test_backup": BackupInfo.OBSOLETE, "test_backup2": BackupInfo.VALID, "test_backup3": BackupInfo.VALID, } # Expect a ValueError if passed context is invalid with pytest.raises(ValueError): rp.report(context="invalid") # Set a new minimum_redundancy parameter, enforcing the usage of the # configuration parameter instead of the retention policy default server.config.minimum_redundancy = 3 # execute retention policy report rp.report() # Check for the warning inside the log caplog.set_level(logging.WARNING) log = caplog.text assert log.find( "WARNING Retention policy redundancy (2) " "is lower than the required minimum redundancy (3). " "Enforce 3." )
def test_keep_unknown_recovery_target(self, mock_server, mock_backup_manager): """Verify backups with an unrecognized keep target default to KEEP_FULL""" mock_server.backup_manager = mock_backup_manager rp = RetentionPolicyFactory.create( "retention_policy", "REDUNDANCY 2", server=mock_server ) self.keep_targets = {"test_backup": "unsupported_recovery_target"} report = rp.report() assert report == { "test_backup": BackupInfo.KEEP_FULL, "test_backup2": BackupInfo.VALID, "test_backup3": BackupInfo.VALID, }
def test_redundancy_report(self, caplog): """ Test of the management of the minimum_redundancy parameter into the backup_report method of the RedundancyRetentionPolicy class """ server = build_mocked_server() rp = RetentionPolicyFactory.create( server, 'retention_policy', 'REDUNDANCY 2') assert isinstance(rp, RedundancyRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info( server=rp.server, backup_id='test1', end_time=datetime.now(tzlocal())) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 rp.server.get_available_backups.return_value = { "test_backup": backup_info, "test_backup2": backup_info, "test_backup3": backup_info, } rp.server.config.minimum_redundancy = 1 # execute retention policy report report = rp.report() # check that our mock is valid for the retention policy because # the total number of valid backups is lower than the retention policy # redundancy. assert report == {'test_backup': BackupInfo.OBSOLETE, 'test_backup2': BackupInfo.VALID, 'test_backup3': BackupInfo.VALID} # Expect a ValueError if passed context is invalid with pytest.raises(ValueError): rp.report(context='invalid') # Set a new minimum_redundancy parameter, enforcing the usage of the # configuration parameter instead of the retention policy default rp.server.config.minimum_redundancy = 3 # execute retention policy report rp.report() # Check for the warning inside the log caplog.set_level(logging.WARNING) log = caplog.text assert log.find("WARNING Retention policy redundancy (2) " "is lower than the required minimum redundancy (3). " "Enforce 3.")
def test_keep_full_out_of_policy(self, mock_server, mock_backup_manager): """ Test that a keep:full backup out-of-policy is reported as KEEP_FULL. """ mock_server.backup_manager = mock_backup_manager rp = RetentionPolicyFactory.create("retention_policy", "REDUNDANCY 2", server=mock_server) self.keep_targets = {"test_backup": KeepManager.TARGET_FULL} report = rp.report() assert report == { "test_backup": BackupInfo.KEEP_FULL, "test_backup2": BackupInfo.VALID, "test_backup3": BackupInfo.VALID, }
def test_keep_full_within_policy(self, mock_server, mock_backup_manager): """ Test that a keep:full backup within policy is reported as KEEP_FULL. """ mock_server.backup_manager = mock_backup_manager rp = RetentionPolicyFactory.create("retention_policy", "RECOVERY WINDOW OF 4 WEEKS", server=mock_server) self.keep_targets = {"test_backup4": KeepManager.TARGET_FULL} report = rp.report() assert report == { "test_backup": BackupInfo.OBSOLETE, "test_backup2": BackupInfo.POTENTIALLY_OBSOLETE, "test_backup3": BackupInfo.VALID, "test_backup4": BackupInfo.KEEP_FULL, }
def test_keep_standalone_within_policy(self, mock_server, mock_backup_manager): """ Test that a keep:standalone backup within policy is reported as KEEP_STANDALONE. """ mock_server.backup_manager = mock_backup_manager rp = RetentionPolicyFactory.create( "retention_policy", "REDUNDANCY 2", server=mock_server ) self.keep_targets = {"test_backup3": KeepManager.TARGET_STANDALONE} report = rp.report() assert report == { "test_backup": BackupInfo.OBSOLETE, "test_backup2": BackupInfo.VALID, "test_backup3": BackupInfo.KEEP_STANDALONE, }
def test_keep_standalone_out_of_policy(self, mock_server, mock_backup_manager): """ Test that a keep:standalone backup out-of-policy is reported as KEEP_STANDALONE. """ mock_server.backup_manager = mock_backup_manager rp = RetentionPolicyFactory.create( "retention_policy", "RECOVERY WINDOW OF 4 WEEKS", server=mock_server ) self.keep_targets = {"test_backup": KeepManager.TARGET_STANDALONE} report = rp.report() assert report == { "test_backup": BackupInfo.KEEP_STANDALONE, "test_backup2": BackupInfo.POTENTIALLY_OBSOLETE, "test_backup3": BackupInfo.VALID, "test_backup4": BackupInfo.VALID, }
def test_keep_full_minimum_redundancy(self, mock_server, mock_backup_manager): """ Test that a keep:full backup which would normally be flagged as POTENTIALLY_OBSOLETE due to not meeting the minimum redundancy (3 in this case) is reported as KEEP_FULL. """ mock_server.backup_manager = mock_backup_manager rp = RetentionPolicyFactory.create( "retention_policy", "RECOVERY WINDOW OF 4 WEEKS", server=mock_server ) self.keep_targets = {"test_backup2": KeepManager.TARGET_FULL} report = rp.report() assert report == { "test_backup": BackupInfo.OBSOLETE, "test_backup2": BackupInfo.KEEP_FULL, "test_backup3": BackupInfo.VALID, "test_backup4": BackupInfo.VALID, }
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): if not cloud_interface.test_connectivity(): raise SystemExit(1) # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise SystemExit(1) catalog = CloudBackupCatalog( cloud_interface=cloud_interface, server_name=config.server_name ) # Call catalog.get_backup_list now so we know we can read the whole catalog # (the results are cached so this does not result in extra calls to cloud # storage) catalog.get_backup_list() if len(catalog.unreadable_backups) > 0: logging.error( "Cannot read the following backups: %s\n" "Unsafe to proceed with deletion due to failure reading backup catalog" % catalog.unreadable_backups ) raise SystemExit(1) if config.backup_id: # Because we only care about one backup, skip the annotation cache # because it is only helpful when dealing with multiple backups if catalog.should_keep_backup(config.backup_id, use_cache=False): logging.error( "Skipping delete of backup %s for server %s " "as it has a current keep request. If you really " "want to delete this backup please remove the keep " "and try again.", config.backup_id, config.server_name, ) raise SystemExit(1) _delete_backup( cloud_interface, catalog, config.backup_id, config.dry_run ) elif config.retention_policy: retention_policy = RetentionPolicyFactory.create( "retention_policy", config.retention_policy, server_name=config.server_name, catalog=catalog, ) # Sort to ensure that we delete the backups in ascending order, that is # from oldest to newest. This ensures that the relevant WALs will be cleaned # up after each backup is deleted. backups_to_delete = sorted( [ backup_id for backup_id, status in retention_policy.report().items() if status == "OBSOLETE" ] ) for backup_id in backups_to_delete: _delete_backup( cloud_interface, catalog, backup_id, config.dry_run, skip_wal_cleanup_if_standalone=False, ) except Exception as exc: logging.error("Barman cloud backup delete exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise SystemExit(1)
def test_recovery_window_report(self, server, caplog): """ Basic unit test of RecoveryWindowRetentionPolicy Given a mock simulating a Backup with status DONE and the end_date not over the point of recoverability, the report method of the RecoveryWindowRetentionPolicy class must mark it as valid """ rp = RetentionPolicyFactory.create( "retention_policy", "RECOVERY WINDOW OF 4 WEEKS", server=server ) assert isinstance(rp, RecoveryWindowRetentionPolicy) # Build a BackupInfo object with status to DONE backup_source = { "test_backup3": build_test_backup_info( server=server, backup_id="test_backup3", end_time=datetime.now(tzlocal()), ) } # Add a obsolete backup backup_source["test_backup2"] = build_test_backup_info( server=server, backup_id="test_backup2", end_time=datetime.now(tzlocal()) - timedelta(weeks=5), ) # Add a second obsolete backup backup_source["test_backup"] = build_test_backup_info( server=server, backup_id="test_backup", end_time=datetime.now(tzlocal()) - timedelta(weeks=6), ) server.get_available_backups.return_value = backup_source # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 server.config.minimum_redundancy = 1 server.config.name = "test" # execute retention policy report report = rp.report() # check that our mock is valid for the retention policy assert report == { "test_backup3": "VALID", "test_backup2": "VALID", "test_backup": "OBSOLETE", } # Expect a ValueError if passed context is invalid with pytest.raises(ValueError): rp.report(context="invalid") # Set a new minimum_redundancy parameter, enforcing the usage of the # configuration parameter instead of the retention policy default server.config.minimum_redundancy = 4 # execute retention policy report rp.report() # Check for the warning inside the log caplog.set_level(logging.WARNING) log = caplog.text warn = ( r"WARNING .*Keeping obsolete backup test_backup for " r"server test \(older than .*\) due to minimum redundancy " r"requirements \(4\)\n" ) assert re.search(warn, log)
def test_backup_status(self): """ Basic unit test of method backup_status Given a mock simulating a Backup with status DONE and requesting the status through the backup_status method, the RetentionPolicy class must mark it as valid This method tests the validity of a backup using both RedundancyRetentionPolicy and RecoveryWindowRetentionPolicy """ server = build_mocked_server() rp = RetentionPolicyFactory.create( server, 'retention_policy', 'REDUNDANCY 2') assert isinstance(rp, RedundancyRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info( server=rp.server, backup_id='test1', end_time=datetime.now(tzlocal())) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 rp.server.get_available_backups.return_value = { "test_backup": backup_info } rp.server.config.minimum_redundancy = 1 # execute retention policy report report = rp.backup_status('test_backup') assert report == 'VALID' # Force context of retention policy for testing purposes. # Expect the method to return a BackupInfo.NONE value rp.context = 'invalid' empty_report = rp.backup_status('test_backup') assert empty_report == BackupInfo.NONE rp = RetentionPolicyFactory.create( server, 'retention_policy', 'RECOVERY WINDOW OF 4 WEEKS') assert isinstance(rp, RecoveryWindowRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info( server=rp.server, backup_id='test1', end_time=datetime.now(tzlocal())) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 rp.server.get_available_backups.return_value = { "test_backup": backup_info } rp.server.config.minimum_redundancy = 1 # execute retention policy report report = rp.backup_status("test_backup") assert report == 'VALID' # Force context of retention policy for testing purposes. # Expect the method to return a BackupInfo.NONE value rp.context = 'invalid' empty_report = rp.backup_status('test_backup') assert empty_report == BackupInfo.NONE
def test_backup_status(self): """ Basic unit test of method backup_status Given a mock simulating a Backup with status DONE and requesting the status through the backup_status method, the RetentionPolicy class must mark it as valid This method tests the validity of a backup using both RedundancyRetentionPolicy and RecoveryWindowRetentionPolicy """ server = build_mocked_server() rp = RetentionPolicyFactory.create(server, 'retention_policy', 'REDUNDANCY 2') assert isinstance(rp, RedundancyRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info(server=rp.server, backup_id='test1', end_time=datetime.now(tzlocal())) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 rp.server.get_available_backups.return_value = { "test_backup": backup_info } rp.server.config.minimum_redundancy = 1 # execute retention policy report report = rp.backup_status('test_backup') assert report == 'VALID' # Force context of retention policy for testing purposes. # Expect the method to return a BackupInfo.NONE value rp.context = 'invalid' empty_report = rp.backup_status('test_backup') assert empty_report == BackupInfo.NONE rp = RetentionPolicyFactory.create(server, 'retention_policy', 'RECOVERY WINDOW OF 4 WEEKS') assert isinstance(rp, RecoveryWindowRetentionPolicy) # Build a BackupInfo object with status to DONE backup_info = build_test_backup_info(server=rp.server, backup_id='test1', end_time=datetime.now(tzlocal())) # instruct the get_available_backups method to return a map with # our mock as result and minimum_redundancy = 1 rp.server.get_available_backups.return_value = { "test_backup": backup_info } rp.server.config.minimum_redundancy = 1 # execute retention policy report report = rp.backup_status("test_backup") assert report == 'VALID' # Force context of retention policy for testing purposes. # Expect the method to return a BackupInfo.NONE value rp.context = 'invalid' empty_report = rp.backup_status('test_backup') assert empty_report == BackupInfo.NONE