def test_check_postgres(self, postgres_mock, capsys): """ Test management of check_postgres view output :param postgres_mock: mock get_remote_status function :param capsys: retrieve output from consolle """ postgres_mock.return_value = {"server_txt_version": None} # Create server server = build_real_server() # Case: no reply by PostgreSQL # Expect out: PostgreSQL: FAILED strategy = CheckOutputStrategy() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == " PostgreSQL: FAILED\n" # Case: correct configuration postgres_mock.return_value = { "current_xlog": None, "archive_command": "wal to archive", "pgespresso_installed": None, "server_txt_version": "PostgresSQL 9_4", "data_directory": "/usr/local/postgres", "archive_mode": "on", "wal_level": "replica", } # Expect out: all parameters: OK # Postgres version >= 9.0 - check wal_level server = build_real_server() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == "\tPostgreSQL: OK\n" "\twal_level: OK\n" # Postgres version < 9.0 - avoid wal_level check del postgres_mock.return_value["wal_level"] server = build_real_server() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == "\tPostgreSQL: OK\n" # Case: wal_level and archive_command values are not acceptable postgres_mock.return_value = { "current_xlog": None, "archive_command": None, "pgespresso_installed": None, "server_txt_version": "PostgresSQL 9_4", "data_directory": "/usr/local/postgres", "archive_mode": "on", "wal_level": "minimal", } # Expect out: some parameters: FAILED strategy = CheckOutputStrategy() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == "\tPostgreSQL: OK\n" "\twal_level: FAILED (please set it to a higher level " "than 'minimal')\n"
def test_check_postgres(self, postgres_mock, capsys): """ Test management of check_postgres view output :param postgres_mock: mock get_remote_status function :param capsys: retrieve output from consolle """ postgres_mock.return_value = {'server_txt_version': None} # Create server server = build_real_server() # Case: no reply by PostgreSQL # Expect out: PostgreSQL: FAILED strategy = CheckOutputStrategy() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == ' PostgreSQL: FAILED\n' # Case: correct configuration postgres_mock.return_value = {'current_xlog': None, 'archive_command': 'wal to archive', 'pgespresso_installed': None, 'server_txt_version': 'PostgresSQL 9_4', 'data_directory': '/usr/local/postgres', 'archive_mode': 'on', 'wal_level': 'replica'} # Expect out: all parameters: OK # Postgres version >= 9.0 - check wal_level server = build_real_server() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == "\tPostgreSQL: OK\n" \ "\twal_level: OK\n" # Postgres version < 9.0 - avoid wal_level check del postgres_mock.return_value['wal_level'] server = build_real_server() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == "\tPostgreSQL: OK\n" # Case: wal_level and archive_command values are not acceptable postgres_mock.return_value = {'current_xlog': None, 'archive_command': None, 'pgespresso_installed': None, 'server_txt_version': 'PostgresSQL 9_4', 'data_directory': '/usr/local/postgres', 'archive_mode': 'on', 'wal_level': 'minimal'} # Expect out: some parameters: FAILED strategy = CheckOutputStrategy() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == "\tPostgreSQL: OK\n" \ "\twal_level: FAILED (please set it to a higher level " \ "than 'minimal')\n"
def test_current_xlog_info(self, is_in_recovery_mock, conn_mock): """ Test correct select xlog_loc """ # Build and configure a server using a mock server = build_real_server() cursor_mock = conn_mock.return_value.cursor.return_value timestamp = datetime.datetime(2016, 3, 30, 17, 4, 20, 271376) current_xlog_info = dict( location='0/35000528', file_name='000000010000000000000035', file_offset=1320, timestamp=timestamp, ) cursor_mock.fetchone.return_value = current_xlog_info is_in_recovery_mock.return_value = False # sequence remote_loc = server.postgres.current_xlog_info assert remote_loc == current_xlog_info cursor_mock.execute.assert_called_once_with( 'SELECT location, (pg_xlogfile_name_offset(location)).*, ' 'CURRENT_TIMESTAMP AS timestamp ' 'FROM pg_current_xlog_location() AS location') # Reset mock conn_mock.reset_mock() # Test error management cursor_mock.execute.side_effect = PostgresConnectionError assert server.postgres.current_xlog_info is None cursor_mock.execute.side_effect = psycopg2.ProgrammingError assert server.postgres.current_xlog_info is None
def test_stop_exclusive_backup(self, conn): """ Basic test for the stop_exclusive_backup method :param conn: a mock that imitates a connection to PostgreSQL """ # Build a server server = build_real_server() # Expect no errors on normal call assert server.postgres.stop_exclusive_backup() # check the correct invocation of the execute method cursor_mock = conn.return_value.cursor.return_value cursor_mock.execute.assert_called_once_with( 'SELECT location, ' '(pg_xlogfile_name_offset(location)).*, ' 'now() AS timestamp ' 'FROM pg_stop_backup() AS location' ) # Test 2: Setup the mock to trigger an exception # expect the method to return None conn.reset_mock() cursor_mock.execute.side_effect = psycopg2.Error # Check that the method returns None as result assert server.postgres.stop_exclusive_backup() is None
def test_streaming_server_txt_version(self, conn): """ simple test for the server_txt_version property """ # Build a server server = build_real_server( main_conf={ 'streaming_archiver': True, 'streaming_conninfo': 'dummy=param'}) conn.return_value.server_version = 80300 assert server.streaming.server_txt_version == '8.3.0' conn.return_value.server_version = 90000 assert server.streaming.server_txt_version == '9.0.0' conn.return_value.server_version = 90005 assert server.streaming.server_txt_version == '9.0.5' conn.return_value.server_version = 100201 assert server.streaming.server_txt_version == '10.2.1' conn.return_value.server_version = 101811 assert server.streaming.server_txt_version == '10.18.11' conn.return_value.server_version = 0 assert server.streaming.server_txt_version == '0.0.0'
def test_get_streaming_remote_status(self, conn): """ simple test for the get_configuration_files method """ # Build a server server = build_real_server( main_conf={ 'streaming_archiver': True, 'streaming_conninfo': 'dummy=param'}) # Working streaming connection conn.return_value.server_version = 90300 result = server.streaming.get_remote_status() assert result['streaming'] is True # Working non-streaming connection cursor_mock = conn.return_value.cursor.return_value cursor_mock.execute.assert_called_once_with("IDENTIFY_SYSTEM") conn.reset_mock() cursor_mock.execute.side_effect = psycopg2.ProgrammingError result = server.streaming.get_remote_status() assert result['streaming'] is False # Connection failed cursor_mock.execute.assert_called_once_with("IDENTIFY_SYSTEM") conn.reset_mock() conn.side_effect = PostgresConnectionError result = server.streaming.get_remote_status() assert result['streaming'] is None
def test_check_archive(self, tmpdir): """ Test the check_archive method """ # Setup temp dir and server server = build_real_server( global_conf={"barman_lock_directory": tmpdir.mkdir("lock").strpath}, main_conf={"wals_directory": tmpdir.mkdir("wals").strpath}, ) strategy = CheckStrategy() # Call the server on an unexistent xlog file. expect it to fail server.check_archive(strategy) assert strategy.has_error is True assert strategy.check_result[0].check == "WAL archive" assert strategy.check_result[0].status is False # Call the check on an empty xlog file. expect it to contain errors. with open(server.xlogdb_file_name, "a"): # the open call forces the file creation pass server.check_archive(strategy) assert strategy.has_error is True assert strategy.check_result[0].check == "WAL archive" assert strategy.check_result[0].status is False # Write something in the xlog db file and check for the results with server.xlogdb("w") as fxlogdb: fxlogdb.write("00000000000000000000") # The check strategy should contain no errors. strategy = CheckStrategy() server.check_archive(strategy) assert strategy.has_error is False assert len(strategy.check_result) == 0
def test_check_archiver_errors(self, isdir_mock, listdir_mock): server = build_real_server() check_strategy = MagicMock() # There is no error file check_strategy.reset_mock() listdir_mock.return_value = [] server.check_archiver_errors(check_strategy) check_strategy.result.assert_called_with("main", "archiver errors", True, None) # There is one duplicate file check_strategy.reset_mock() listdir_mock.return_value = ["testing.duplicate"] server.check_archiver_errors(check_strategy) check_strategy.result.assert_called_with("main", "archiver errors", False, "duplicates: 1") # There is one unknown file check_strategy.reset_mock() listdir_mock.return_value = ["testing.unknown"] server.check_archiver_errors(check_strategy) check_strategy.result.assert_called_with("main", "archiver errors", False, "unknown: 1") # There is one not relevant file check_strategy.reset_mock() listdir_mock.return_value = ["testing.error"] server.check_archiver_errors(check_strategy) check_strategy.result.assert_called_with("main", "archiver errors", False, "not relevant: 1") # There is one extraneous file check_strategy.reset_mock() listdir_mock.return_value = ["testing.wrongextension"] server.check_archiver_errors(check_strategy) check_strategy.result.assert_called_with("main", "archiver errors", False, "unknown failure: 1")
def test_delete_running_backup(self, delete_mock, get_first_backup_mock, tmpdir, capsys): """ Simple test for the deletion of a running backup. We want to test the behaviour of the server.delete_backup method when invoked on a running backup """ # Test the removal of a running backup. status STARTED server = build_real_server({"barman_home": tmpdir.strpath}) backup_info_started = build_test_backup_info(status=BackupInfo.STARTED, server_name=server.config.name) get_first_backup_mock.return_value = backup_info_started.backup_id with ServerBackupLock(tmpdir.strpath, server.config.name): server.delete_backup(backup_info_started) out, err = capsys.readouterr() assert "Cannot delete a running backup (%s %s)" % (server.config.name, backup_info_started.backup_id) in err # Test the removal of a running backup. status EMPTY backup_info_empty = build_test_backup_info(status=BackupInfo.EMPTY, server_name=server.config.name) get_first_backup_mock.return_value = backup_info_empty.backup_id with ServerBackupLock(tmpdir.strpath, server.config.name): server.delete_backup(backup_info_empty) out, err = capsys.readouterr() assert "Cannot delete a running backup (%s %s)" % (server.config.name, backup_info_started.backup_id) in err # Test the removal of a running backup. status DONE backup_info_done = build_test_backup_info(status=BackupInfo.DONE, server_name=server.config.name) with ServerBackupLock(tmpdir.strpath, server.config.name): server.delete_backup(backup_info_done) delete_mock.assert_called_with(backup_info_done) # Test the removal of a backup not running. status STARTED server.delete_backup(backup_info_started) delete_mock.assert_called_with(backup_info_started)
def test_current_xlog_file_name(self, is_in_recovery_mock, conn_mock): """ simple test for current_xlog property """ # Build a server server = build_real_server() cursor_mock = conn_mock.return_value.cursor.return_value timestamp = datetime.datetime(2016, 3, 30, 17, 4, 20, 271376) cursor_mock.fetchone.return_value = dict( location='0/35000528', file_name='000000010000000000000035', file_offset=1320, timestamp=timestamp, ) # Special way to mock a property is_in_recovery_mock.return_value = False assert server.postgres.current_xlog_file_name == ( '000000010000000000000035') # Reset mock conn_mock.reset_mock() # Test error management cursor_mock.execute.side_effect = PostgresConnectionError assert server.postgres.current_xlog_file_name is None cursor_mock.execute.side_effect = psycopg2.ProgrammingError assert server.postgres.current_xlog_file_name is None
def test_kill(self, pm_mock, capsys): server = build_real_server() # Empty process list, the process is not running task_name = 'test_task' process_list = [] pm_mock.return_value.list.return_value = process_list pm_mock.return_value.kill.return_value = True server.kill(task_name) out, err = capsys.readouterr() assert ('Termination of %s failed: no such process for server %s' % ( task_name, server.config.name)) in err # Successful kill pid = 1234 process_list.append(ProcessInfo(pid, server.config.name, task_name)) pm_mock.return_value.list.return_value = process_list pm_mock.return_value.kill.return_value = True server.kill('test_task') out, err = capsys.readouterr() assert ('Stopped process %s(%s)' % (task_name, pid)) in out # The process don't terminate pm_mock.return_value.kill.return_value = False server.kill('test_task') out, err = capsys.readouterr() assert ('ERROR: Cannot terminate process %s(%s)' % (task_name, pid)) in err
def test_timeline_has_children(self, tmpdir): """ Test for the timeline_has_children """ server = build_real_server({'barman_home': tmpdir.strpath}) tmpdir.join('main/wals').ensure(dir=True) # Write two history files history_2 = server.get_wal_full_path('00000002.history') with open(history_2, "w") as fp: fp.write('1\t2/83000168\tat restore point "myrp"\n') history_3 = server.get_wal_full_path('00000003.history') with open(history_3, "w") as fp: fp.write('1\t2/83000168\tat restore point "myrp"\n') history_4 = server.get_wal_full_path('00000004.history') with open(history_4, "w") as fp: fp.write('1\t2/83000168\tat restore point "myrp"\n') fp.write('2\t2/84000268\tunknown\n') # Check that the first timeline has children but the # others have not assert len(server.get_children_timelines(1)) == 3 assert len(server.get_children_timelines(2)) == 1 assert len(server.get_children_timelines(3)) == 0 assert len(server.get_children_timelines(4)) == 0
def test_drop_repslot(self, capsys): """ Test the 'drop_repslot' method of the Postgres class """ # No operation if there is no streaming connection server = build_real_server() server.streaming = None assert server.drop_repslot() is None # No operation if the slot name is empty server.streaming = MagicMock() server.config.slot_name = None server.streaming.server_version = 90400 assert server.drop_repslot() is None # If there is a streaming connection and the replication # slot is defined, then the replication slot should be # created server.config.slot_name = 'test_repslot' server.streaming.server_version = 90400 server.drop_repslot() drop_repslot = server.streaming.drop_repslot drop_repslot.assert_called_with('test_repslot') # If the replication slot doesn't exist # check that the underlying exception is correctly managed drop_repslot.side_effect = PostgresInvalidReplicationSlot server.drop_repslot() drop_repslot.assert_called_with('test_repslot') out, err = capsys.readouterr() assert "Replication slot 'test_repslot' does not exist" in err
def test_backup(self, backup_lock_mock, archive_wal_mock, backup_manager_mock, dir_mock, check_mock, capsys): """ :param backup_lock_mock: mock ServerBackupLock :param archive_wal_mock: mock archive_wal server method :param backup_manager_mock: mock BackupManager.backup :param dir_mock: mock _make_directories :param check_mock: mock check """ # Create server server = build_real_server() dir_mock.side_effect = OSError() server.backup() out, err = capsys.readouterr() assert 'failed to create' in err dir_mock.side_effect = None server.backup() backup_manager_mock.assert_called_once_with() archive_wal_mock.assert_called_once_with(verbose=False) backup_manager_mock.side_effect = LockFileBusy() server.backup() out, err = capsys.readouterr() assert 'Another backup process is running' in err backup_manager_mock.side_effect = LockFilePermissionDenied() server.backup() out, err = capsys.readouterr() assert 'Permission denied, unable to access' in err
def test_xlogdb_with_exception(self, os_mock, tmpdir): """ Testing the execution of xlog-db operations with an Exception :param os_mock: mock for os module :param tmpdir: temporary directory unique to the test invocation """ # unpatch os.path os_mock.path = os.path # Setup temp dir and server server = build_real_server( global_conf={ "barman_lock_directory": tmpdir.mkdir('lock').strpath }, main_conf={ "wals_directory": tmpdir.mkdir('wals').strpath }) # Test the execution of the fsync on xlogdb file forcing an exception with pytest.raises(ExceptionTest): with server.xlogdb('w') as fxlogdb: fxlogdb.write("00000000000000000000") raise ExceptionTest() # Check call on fsync method. If the call have been issued, # the "exit" section of the contextmanager have been executed assert os_mock.fsync.called
def test_recover_basebackup_copy(self, rsync_pg_mock, tmpdir): """ Test the copy of a content of a backup during a recovery :param rsync_pg_mock: Mock rsync object for the purpose if this test """ # Build basic folder/files structure dest = tmpdir.mkdir('destination') server = testing_helpers.build_real_server() backup_info = testing_helpers.build_test_backup_info( server=server, tablespaces=[('tbs1', 16387, '/fake/location')]) # Build a executor executor = RecoveryExecutor(server.backup_manager) executor.config.tablespace_bandwidth_limit = {'tbs1': ''} executor.config.bandwidth_limit = 10 executor.basebackup_copy( backup_info, dest.strpath, tablespaces=None) rsync_pg_mock.assert_called_with( network_compression=False, bwlimit=10, ssh=None, path=None, exclude_and_protect=['/pg_tblspc/16387']) rsync_pg_mock.assert_any_call( network_compression=False, bwlimit='', ssh=None, path=None, check=True) rsync_pg_mock.return_value.smart_copy.assert_any_call( '/some/barman/home/main/base/1234567890/16387/', '/fake/location', None) rsync_pg_mock.return_value.smart_copy.assert_called_with( '/some/barman/home/main/base/1234567890/data/', dest.strpath, None)
def test_prepare_tablespaces(self, tmpdir): """ Test tablespaces preparation for recovery """ # Prepare basic directory/files structure dest = tmpdir.mkdir('destination') wals = tmpdir.mkdir('wals') backup_info = testing_helpers.build_test_backup_info( tablespaces=[('tbs1', 16387, '/fake/location')]) # build an executor server = testing_helpers.build_real_server( main_conf={'wals_directory': wals.strpath}) executor = RecoveryExecutor(server.backup_manager) # use a mock as cmd obj cmd_mock = Mock() executor.prepare_tablespaces(backup_info, cmd_mock, dest.strpath, {}) cmd_mock.create_dir_if_not_exists.assert_any_call( dest.join('pg_tblspc').strpath) cmd_mock.create_dir_if_not_exists.assert_any_call( '/fake/location') cmd_mock.delete_if_exists.assert_called_once_with( dest.join('pg_tblspc').join('16387').strpath) cmd_mock.create_symbolic_link.assert_called_once_with( '/fake/location', dest.join('pg_tblspc').join('16387').strpath)
def test_is_in_recovery(self, conn_mock): """ simple test for is_in_recovery property """ # Build a server server = build_real_server() cursor_mock = conn_mock.return_value.cursor.return_value # Too old conn_mock.return_value.server_version = 80400 assert not server.postgres.is_in_recovery # In recovery conn_mock.return_value.server_version = 90100 cursor_mock.fetchone.return_value = [True] assert server.postgres.is_in_recovery cursor_mock.execute.assert_called_once_with("SELECT pg_is_in_recovery()") # Not in recovery cursor_mock.fetchone.return_value = [False] assert not server.postgres.is_in_recovery # Reset mock conn_mock.reset_mock() # Test error management cursor_mock.execute.side_effect = PostgresConnectionError assert server.postgres.is_in_recovery is None cursor_mock.execute.side_effect = psycopg2.ProgrammingError assert server.postgres.is_in_recovery is None
def test_streaming_server_txt_version(self, conn_mock): """ simple test for the server_txt_version property """ # Build a server server = build_real_server(main_conf={"streaming_archiver": True, "streaming_conninfo": "dummy=param"}) # Connection error conn_mock.side_effect = PostgresConnectionError assert server.streaming.server_txt_version is None # Good connection conn_mock.side_effect = None conn_mock.return_value.server_version = 80300 assert server.streaming.server_txt_version == "8.3.0" conn_mock.return_value.server_version = 90000 assert server.streaming.server_txt_version == "9.0.0" conn_mock.return_value.server_version = 90005 assert server.streaming.server_txt_version == "9.0.5" conn_mock.return_value.server_version = 100201 assert server.streaming.server_txt_version == "10.2.1" conn_mock.return_value.server_version = 101811 assert server.streaming.server_txt_version == "10.18.11" conn_mock.return_value.server_version = 0 assert server.streaming.server_txt_version == "0.0.0"
def test_get_archiver_stats(self, conn): """ Simple test for pg_start_backup method of the RsyncBackupExecutor class """ # Build a server server = build_real_server() cursor_mock = conn.return_value.cursor.return_value # expect None as result for server version <9.4 conn.return_value.server_version = 80300 assert server.postgres.get_archiver_stats() is None # expect no errors with version >= 9.4 conn.reset_mock() conn.return_value.server_version = 90400 cursor_mock.fetchone.return_value = {"a": "b"} assert server.postgres.get_archiver_stats() == {"a": "b"} # check for the correct call on the execute method cursor_mock.execute.assert_called_once_with( "SELECT *, current_setting('archive_mode')::BOOLEAN " "AND (last_failed_wal IS NULL " "OR last_failed_wal LIKE '%.history' " "AND substring(last_failed_wal from 1 for 8) " "<= substring(last_archived_wal from 1 for 8) " "OR last_failed_wal <= last_archived_wal) " "AS is_archiving, " "CAST (archived_count AS NUMERIC) " "/ EXTRACT (EPOCH FROM age(now(), stats_reset)) " "AS current_archived_wals_per_second " "FROM pg_stat_archiver" ) conn.reset_mock() # test error management cursor_mock.execute.side_effect = psycopg2.Error assert server.postgres.get_archiver_stats() is None
def test_has_pgespresso(self, conn_mock): """ simple test for has_pgespresso property """ # Build a server server = build_real_server() cursor_mock = conn_mock.return_value.cursor.return_value # Too old conn_mock.return_value.server_version = 90000 assert not server.postgres.has_pgespresso # Extension present conn_mock.return_value.server_version = 90100 cursor_mock.fetchone.return_value = [1] assert server.postgres.has_pgespresso cursor_mock.execute.assert_called_once_with("SELECT count(*) FROM pg_extension " "WHERE extname = 'pgespresso'") # Extension not present cursor_mock.fetchone.return_value = [0] assert not server.postgres.has_pgespresso # Reset mock conn_mock.reset_mock() # Test error management cursor_mock.execute.side_effect = PostgresConnectionError assert server.postgres.has_pgespresso is None cursor_mock.execute.side_effect = psycopg2.ProgrammingError assert server.postgres.has_pgespresso is None
def test_pgespresso_start_backup(self, conn): """ Simple test for _pgespresso_start_backup method of the RsyncBackupExecutor class """ # Build and configure a server server = build_real_server() backup_label = "test label" # expect no errors assert server.postgres.pgespresso_start_backup(backup_label) cursor_mock = conn.return_value.cursor.return_value cursor_mock.execute.assert_called_once_with( "SELECT pgespresso_start_backup(%s,%s), now()", (backup_label, server.postgres.config.immediate_checkpoint) ) # reset the mock for the second test conn.reset_mock() # Test 2: Setup the mock to trigger an exception # expect the method to return None cursor_mock.execute.side_effect = psycopg2.Error # Check that the method returns None as result with pytest.raises(Exception): server.postgres.pgespresso_start_backup("test_label")
def test_get_wal_info(self, get_wal_mock, tmpdir): """ Basic test for get_wal_info method Test the wals per second and total time in seconds values. :return: """ # Build a test server with a test path server = build_real_server(global_conf={ 'barman_home': tmpdir.strpath }) # Mock method get_wal_until_next_backup for returning a list of # 3 fake WAL. the first one is the start and stop WAL of the backup wal_list = [ WalFileInfo.from_xlogdb_line( "000000010000000000000002\t16777216\t1434450086.53\tNone\n"), WalFileInfo.from_xlogdb_line( "000000010000000000000003\t16777216\t1434450087.54\tNone\n"), WalFileInfo.from_xlogdb_line( "000000010000000000000004\t16777216\t1434450088.55\tNone\n")] get_wal_mock.return_value = wal_list backup_info = build_test_backup_info( server=server, begin_wal=wal_list[0].name, end_wal=wal_list[0].name) backup_info.save() # Evaluate total time in seconds: # last_wal_timestamp - first_wal_timestamp wal_total_seconds = wal_list[-1].time - wal_list[0].time # Evaluate the wals_per_second value: # wals_in_backup + wals_until_next_backup / total_time_in_seconds wals_per_second = len(wal_list) / wal_total_seconds wal_info = server.get_wal_info(backup_info) assert wal_info assert wal_info['wal_total_seconds'] == wal_total_seconds assert wal_info['wals_per_second'] == wals_per_second
def test_streaming_server_txt_version(self, conn_mock): """ simple test for the server_txt_version property """ # Build a server server = build_real_server() cursor_mock = conn_mock.return_value.cursor.return_value # Connection error conn_mock.side_effect = PostgresConnectionError assert server.postgres.server_txt_version is None # Communication error conn_mock.side_effect = None cursor_mock.execute.side_effect = psycopg2.ProgrammingError assert server.postgres.server_txt_version is None # Good connection cursor_mock.execute.side_effect = None cursor_mock.fetchone.return_value = ( "PostgreSQL 9.4.5 on x86_64-apple-darwin15.0.0, compiled by " "Apple LLVM version 7.0.0 (clang-700.1.76), 64-bit", ) assert server.postgres.server_txt_version == "9.4.5" cursor_mock.execute.assert_called_with("SELECT version()")
def test_create_restore_point(self, is_in_recovery_mock, conn_mock): """ Basic test for the _restore_point method """ # Simulate a master connection is_in_recovery_mock.return_value = False server = build_real_server() # Test 1: Postgres 9.0 expect None as result conn_mock.return_value.server_version = 90000 restore_point = server.postgres.create_restore_point("Test_20151026T092241") assert restore_point is None # Simulate a master connection is_in_recovery_mock.return_value = True # Test 2: Postgres 9.1 in recovery (standby) expect None as result conn_mock.return_value.server_version = 90100 restore_point = server.postgres.create_restore_point("Test_20151026T092241") assert restore_point is None # Test 3: Postgres 9.1 check mock calls is_in_recovery_mock.return_value = False assert server.postgres.create_restore_point("Test_20151026T092241") cursor_mock = conn_mock.return_value.cursor.return_value cursor_mock.execute.assert_called_with("SELECT pg_create_restore_point(%s)", ["Test_20151026T092241"]) assert cursor_mock.fetchone.called # Test error management cursor_mock.execute.side_effect = psycopg2.Error assert server.postgres.create_restore_point("Test_20151026T092241") is None
def test_recover_xlog(self, rsync_pg_mock, tmpdir): """ Test the recovery of the xlogs of a backup :param rsync_pg_mock: Mock rsync object for the purpose if this test """ # Build basic folders/files structure dest = tmpdir.mkdir('destination') wals = tmpdir.mkdir('wals') xlog_dir = wals.mkdir(xlog.hash_dir('000000000000000000000002')) xlog_file = xlog_dir.join('000000000000000000000002') xlog_file.write('dummy content') server = testing_helpers.build_real_server( main_conf={'wals_directory': wals.strpath}) # build executor executor = RecoveryExecutor(server.backup_manager) required_wals = (WalFileInfo.from_xlogdb_line( '000000000000000000000002\t42\t43\tNone\n'),) executor.xlog_copy(required_wals, dest.strpath, None) # check for a correct invocation of rsync using local paths rsync_pg_mock.from_file_list.assert_called_once( ['000000000000000000000002'], xlog_dir.strpath, dest.strpath) # reset mock calls rsync_pg_mock.reset_mock() required_wals = (WalFileInfo.from_xlogdb_line( '000000000000000000000002\t42\t43\tNone\n'),) executor.backup_manager.compression_manager = Mock() executor.xlog_copy(required_wals, dest.strpath, 'remote_command') # check for the invocation of rsync on a remote call rsync_pg_mock.assert_called_once(network_compression=False, bwlimit=None, ssh='remote_command')
def test_check_archive(self, tmpdir): """ Test the check_archive method """ # Setup temp dir and server server = build_real_server( global_conf={ "barman_lock_directory": tmpdir.mkdir('lock').strpath }, main_conf={ "wals_directory": tmpdir.mkdir('wals').strpath }) strategy = CheckStrategy() # Call the check on an empty xlog file. expect it to contain errors. server.check_archive(strategy) assert strategy.has_error is True assert strategy.check_result[0].check == 'WAL archive' assert strategy.check_result[0].status is False # Write something in the xlog db file and check for the results with server.xlogdb('w') as fxlogdb: fxlogdb.write("00000000000000000000") # The check strategy should contain no errors. strategy = CheckStrategy() server.check_archive(strategy) assert strategy.has_error is False assert len(strategy.check_result) == 0
def test_cron_lock_acquisition(self, subprocess_mock, tmpdir, capsys, caplog): """ Basic test for cron process lock acquisition """ server = build_real_server({'barman_home': tmpdir.strpath}) # Basic cron lock acquisition with ServerCronLock(tmpdir.strpath, server.config.name): server.cron(wals=True, retention_policies=False) out, err = capsys.readouterr() assert ("Another cron process is already running on server %s. " "Skipping to the next server\n" % server.config.name) in out # Lock acquisition for archive-wal with ServerWalArchiveLock(tmpdir.strpath, server.config.name): server.cron(wals=True, retention_policies=False) out, err = capsys.readouterr() assert ("Another archive-wal process is already running " "on server %s. Skipping to the next server" % server.config.name) in out # Lock acquisition for receive-wal with ServerWalArchiveLock(tmpdir.strpath, server.config.name): with ServerWalReceiveLock(tmpdir.strpath, server.config.name): # force the streaming_archiver to True for this test server.config.streaming_archiver = True server.cron(wals=True, retention_policies=False) assert ("Another STREAMING ARCHIVER process is running for " "server %s" % server.config.name) in caplog.text
def test_get_tablespaces(self, conn): """ Simple test for pg_start_backup method of the RsyncBackupExecutor class """ # Build a server server = build_real_server() cursor_mock = conn.return_value.cursor.return_value cursor_mock.fetchall.return_value = [("tbs1", "1234", "/tmp")] # Expect no errors conn.return_value.server_version = 90400 tbs = server.postgres.get_tablespaces() # check for the correct call on the execute method cursor_mock.execute.assert_called_once_with( "SELECT spcname, oid, " "pg_tablespace_location(oid) AS spclocation " "FROM pg_tablespace " "WHERE pg_tablespace_location(oid) != ''" ) assert tbs == [("tbs1", "1234", "/tmp")] conn.reset_mock() # 8.3 test conn.return_value.server_version = 80300 cursor_mock.fetchall.return_value = [("tbs2", "5234", "/tmp1")] tbs = server.postgres.get_tablespaces() # check for the correct call on the execute method cursor_mock.execute.assert_called_once_with( "SELECT spcname, oid, spclocation " "FROM pg_tablespace WHERE spclocation != ''" ) assert tbs == [("tbs2", "5234", "/tmp1")] conn.reset_mock() # test error management cursor_mock.execute.side_effect = psycopg2.Error assert server.postgres.get_tablespaces() is None
def test_switch_xlog(self, capsys): server = build_real_server() server.postgres = MagicMock() server.postgres.switch_xlog.return_value = '000000010000000000000001' server.switch_xlog(force=False) out, err = capsys.readouterr() assert "Switch to 000000010000000000000001 for server 'main'" \ in out assert server.postgres.checkpoint.called is False server.postgres.reset_mock() server.postgres.switch_xlog.return_value = '000000010000000000000001' server.switch_xlog(force=True) out, err = capsys.readouterr() assert "Switch to 000000010000000000000001 for server 'main'" \ in out assert server.postgres.checkpoint.called is True server.postgres.reset_mock() server.postgres.switch_xlog.return_value = '' server.switch_xlog(force=False) out, err = capsys.readouterr() assert "No switch required for server 'main'" in out assert server.postgres.checkpoint.called is False
def _run_analyze_directory(self, list_files_mock, tmpdir, ref_list, src_list): # Apply it to _list_files calls list_files_mock.side_effect = [ref_list, src_list] # Build the prerequisites server = build_real_server( global_conf={"barman_home": tmpdir.mkdir("home").strpath}) config = server.config executor = server.backup_manager.executor # Create the RsyncCopyController putting the safe_horizon between # the tmp/safe and tmp2/check timestamps rcc = RsyncCopyController( path=server.path, ssh_command=executor.ssh_command, ssh_options=executor.ssh_options, network_compression=config.network_compression, reuse_backup=None, safe_horizon=datetime( year=2015, month=2, day=20, hour=19, minute=0, second=0, tzinfo=dateutil.tz.tzlocal(), ), ) backup_info = build_test_backup_info( server=server, pgdata="/pg/data", config_file="/etc/postgresql.conf", hba_file="/pg/data/pg_hba.conf", ident_file="/pg/data/pg_ident.conf", begin_xlog="0/2000028", begin_wal="000000010000000000000002", begin_offset=28, ) backup_info.save() # This is to check that all the preparation is done correctly assert os.path.exists(backup_info.filename) # Add a temp dir (usually created by copy method rcc.temp_dir = tmpdir.mkdir("tmp").strpath # Create an item to inspect item = _RsyncCopyItem( label="pgdata", src=":/pg/data/", dst=backup_info.get_data_directory(), is_directory=True, item_class=rcc.PGDATA_CLASS, optional=False, ) # Then run the _analyze_directory method rcc._analyze_directory(item) return item, backup_info
def test_replication_status(self, capsys): """ Test management of pg_stat_archiver view output :param MagicMock connect_mock: mock the database connection :param capsys: retrieve output from consolle """ # Build a fake get_replication_stats record replication_stats_data = dict( pid=93275, usesysid=10, usename='postgres', application_name='replica', client_addr=None, client_hostname=None, client_port=-1, backend_start=datetime.datetime( 2016, 5, 6, 9, 29, 20, 98534, tzinfo=FixedOffsetTimezone(offset=120)), backend_xmin='940', state='streaming', sent_location='0/3005FF0', write_location='0/3005FF0', flush_location='0/3005FF0', replay_location='0/3005FF0', sync_priority=0, sync_state='async', sent_diff=Decimal('0'), write_diff=Decimal('0'), flush_diff=Decimal('0'), replay_diff=Decimal('0')) replication_stats_class = namedtuple("Record", replication_stats_data.keys()) replication_stats_record = replication_stats_class( **replication_stats_data) # Prepare the server server = build_real_server() server.postgres = MagicMock() server.postgres.get_replication_stats.return_value = [ replication_stats_record ] server.postgres.current_xlog_location = "AB/CDEF1234" # Execute the test (ALL) server.postgres.reset_mock() server.replication_status('all') (out, err) = capsys.readouterr() assert err == '' server.postgres.get_replication_stats.assert_called_once_with( PostgreSQLConnection.ANY_STREAMING_CLIENT) # Execute the test (WALSTREAMER) server.postgres.reset_mock() server.replication_status('wal-streamer') (out, err) = capsys.readouterr() assert err == '' server.postgres.get_replication_stats.assert_called_once_with( PostgreSQLConnection.WALSTREAMER) # Execute the test (failure: PostgreSQL too old) server.postgres.reset_mock() server.postgres.get_replication_stats.side_effect = \ PostgresUnsupportedFeature('9.1') server.replication_status('all') (out, err) = capsys.readouterr() assert 'Requires PostgreSQL 9.1 or higher' in out assert err == '' server.postgres.get_replication_stats.assert_called_once_with( PostgreSQLConnection.ANY_STREAMING_CLIENT) # Execute the test (failure: superuser required) server.postgres.reset_mock() server.postgres.get_replication_stats.side_effect = \ PostgresSuperuserRequired server.replication_status('all') (out, err) = capsys.readouterr() assert 'Requires superuser rights' in out assert err == '' server.postgres.get_replication_stats.assert_called_once_with( PostgreSQLConnection.ANY_STREAMING_CLIENT)
def test_generate_recovery_conf_pre12(self, rsync_pg_mock, tmpdir): """ Test the generation of recovery.conf file """ # Build basic folder/files structure recovery_info = { 'configuration_files': ['postgresql.conf', 'postgresql.auto.conf'], 'tempdir': tmpdir.strpath, 'results': {'changes': [], 'warnings': []}, 'get_wal': False, } backup_info = testing_helpers.build_test_backup_info() dest = tmpdir.mkdir('destination') # Build a recovery executor using a real server server = testing_helpers.build_real_server() executor = RecoveryExecutor(server.backup_manager) executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', None) # Check that the recovery.conf file exists recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() # Parse the generated recovery.conf recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) # check for contents assert 'recovery_end_command' in recovery_conf assert 'recovery_target_time' in recovery_conf assert 'recovery_target_timeline' in recovery_conf assert 'recovery_target_xid' not in recovery_conf assert 'recovery_target_lsn' not in recovery_conf assert 'recovery_target_name' in recovery_conf assert 'recovery_target' not in recovery_conf assert recovery_conf['recovery_end_command'] == "'rm -fr barman_wal'" assert recovery_conf['recovery_target_time'] == \ "'2015-06-03 16:11:03.71038+02'" assert recovery_conf['recovery_target_timeline'] == '2' assert recovery_conf['recovery_target_name'] == "'target_name'" # Test 'pause_at_recovery_target' recovery_info entry recovery_info['pause_at_recovery_target'] = 'on' executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', None) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert recovery_conf['pause_at_recovery_target'] == "'on'" # Test 'recovery_target_action' del recovery_info['pause_at_recovery_target'] recovery_info['recovery_target_action'] = 'pause' executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', None) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert recovery_conf['recovery_target_action'] == "'pause'" # Test 'standby_mode' executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', True) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert recovery_conf['standby_mode'] == "'on'" executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', False) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert 'standby_mode' not in recovery_conf executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', None) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert 'standby_mode' not in recovery_conf
def test_sync_backup(self, logger_mock, rsync_mock, tmpdir, capsys): """ Test the synchronisation method, testing all the possible error conditions. :param MagicMock logger_mock: MagicMock obj mimicking the logger :param MagicMock rsync_mock: MagicMock replacing Rsync class :param py.local.path tmpdir: py.test temporary directory :param capsys: fixture that allow to access stdout/stderr output """ backup_name = "1234567890" server_name = "main" # Prepare paths backup_dir = tmpdir.mkdir(server_name) basebackup_dir = backup_dir.mkdir("base") full_backup_path = basebackup_dir.mkdir(backup_name) self._create_primary_info_file(tmpdir, backup_dir) # Test 1: Not a passive node. # Expect SyncError server = build_real_server( global_conf={"barman_lock_directory": tmpdir.strpath}, main_conf={"backup_directory": backup_dir.strpath}, ) with pytest.raises(SyncError): server.sync_backup(backup_name) # Test 2: normal sync execution, no error expected. # test for all the step on the logger logger_mock.reset_mock() server = build_real_server( global_conf={"barman_lock_directory": tmpdir.strpath}, main_conf={ "backup_directory": backup_dir.strpath, "primary_ssh_command": "ssh fakeuser@fakehost", }, ) server.sync_backup(backup_name) logger_mock.info.assert_any_call( "Synchronising with server %s backup %s: step 1/3: " "parse server information", server_name, backup_name, ) logger_mock.info.assert_any_call( "Synchronising with server %s backup %s: step 2/3: file copy", server_name, backup_name, ) logger_mock.info.assert_any_call( "Synchronising with server %s backup %s: step 3/3: finalise sync", server_name, backup_name, ) # Test 3: test Rsync Failure # Expect a BackupInfo object with status "FAILED" # and a error message on the "error" field of the obj rsync_mock.reset_mock() server.backup_manager._backup_cache = {} rsync_mock.side_effect = CommandFailedException("TestFailure") full_backup_path.remove(rec=1) server.sync_backup(backup_name) backup_info = server.get_backup(backup_name) assert backup_info.status == BackupInfo.FAILED assert ( backup_info.error == "failure syncing server main " "backup 1234567890: TestFailure" ) # Test 4: test KeyboardInterrupt management # Check the error message for the KeyboardInterrupt event rsync_mock.reset_mock() rsync_mock.side_effect = CommandFailedException("TestFailure") full_backup_path.remove(rec=1) rsync_mock.side_effect = KeyboardInterrupt() server.sync_backup(backup_name) backup_info = server.get_backup(backup_name) assert backup_info.status == BackupInfo.FAILED assert ( backup_info.error == "failure syncing server main " "backup 1234567890: KeyboardInterrupt" ) # Test 5: test backup name not present on Master server # Expect a error message on stderr rsync_mock.reset_mock() rsync_mock.side_effect = CommandFailedException("TestFailure") full_backup_path.remove(rec=1) server.sync_backup("wrong_backup_name") (out, err) = capsys.readouterr() # Check the stderr using capsys. we need only the first line # from stderr e = err.split("\n") assert "ERROR: failure syncing server main backup 1234567890: TestFailure" in e # Test 5: Backup already synced # Check for the warning message on the stout using capsys rsync_mock.reset_mock() rsync_mock.side_effect = None # do it the first time and check it succeeded server.sync_backup(backup_name) backup_info = server.get_backup(backup_name) assert backup_info.status == BackupInfo.DONE # do it again ant test it does not call rsync rsync_mock.reset_mock() server.sync_backup(backup_name) assert not rsync_mock.called (out, err) = capsys.readouterr() assert out.strip() == "Backup 1234567890 is already synced with main server"
def test_check_sync_required(self): """ Test the behaviour of the check_sync_required method, testing all the possible error conditions. """ backup_name = "test_backup_name" backups = {'backups': {"test_backup_name": {}}} server = build_real_server() # Test 1 pass no exception server.check_sync_required(backup_name, backups, None) # Test 2 backup_name not in backups and no local backup. SyncError backup_name = "wrong_test_backup_name" with pytest.raises(SyncError): server.check_sync_required(backup_name, backups, None) # Test 3 backup_name not in backups, and incomplete local # copy. Remove partial sync and raise SyncError backup_name = "wrong_test_backup_name" local_backup_info_mock = build_test_backup_info( server=server, status=BackupInfo.FAILED) with pytest.raises(SyncToBeDeleted): server.check_sync_required(backup_name, backups, local_backup_info_mock) # Test 4 Local only copy, nothing to do. backup_name = "wrong_test_backup_name" local_backup_info_mock = build_test_backup_info(server=server, status=BackupInfo.DONE) with pytest.raises(SyncNothingToDo): server.check_sync_required(backup_name, backups, local_backup_info_mock) # Test 5 already synced backup. Nothing to do. backup_name = "test_backup_name" local_backup_info_mock = build_test_backup_info(server=server, status=BackupInfo.DONE) with pytest.raises(SyncNothingToDo): server.check_sync_required(backup_name, backups, local_backup_info_mock) # Test 6 check backup with local retention policies. # Case one: Redundancy retention 1 # Expect "nothing to do" backup_name = "test_backup6" # build a new server with new configuration that uses retention # policies server = build_real_server(global_conf={ "retention_policy": "redundancy 1", "wal_retention_policy": "main" }) backups = { 'backups': { "test_backup6": build_test_backup_info(server=server, backup_id='test_backup6').to_json() }, 'config': { 'name': 'test_server' }, } with mock.patch("barman.server.Server.get_available_backups") as bk: local_backup_info_mock = None bk.return_value = { "test_backup5": build_test_backup_info(server=server, backup_id='test_backup5'), "test_backup7": build_test_backup_info(server=server, backup_id='test_backup7'), } with pytest.raises(SyncNothingToDo): server.check_sync_required(backup_name, backups, local_backup_info_mock) # Test 7 check backup with local retention policies. # Case two: Recovery window of 1 day # Expect "nothing to do" backup_name = "test_backup6" # build a new server with new configuration that uses retention # policies server = build_real_server( global_conf={ "retention_policy": "RECOVERY WINDOW OF 1 day", "wal_retention_policy": "main" }) backups = { 'backups': { "test_backup6": build_test_backup_info(server=server, backup_id='test_backup6', begin_time=(datetime.now(tz.tzlocal()) + timedelta(days=4)), end_time=(datetime.now(tz.tzlocal()) - timedelta(days=3))).to_json() }, 'config': { 'name': 'test_server' }, } with mock.patch("barman.server.Server.get_available_backups") as bk: local_backup_info_mock = None bk.return_value = { "test_backup7": build_test_backup_info(server=server, backup_id='test_backup7', begin_time=(datetime.now(tz.tzlocal()) + timedelta(days=4)), end_time=(datetime.now(tz.tzlocal()) - timedelta(days=3))) } with pytest.raises(SyncNothingToDo): server.check_sync_required(backup_name, backups, local_backup_info_mock)
def test_recovery( self, remote_cmd_mock, rsync_pg_mock, copy_controller_mock, tmpdir ): """ Test the execution of a recovery """ # Prepare basic directory/files structure dest = tmpdir.mkdir("destination") base = tmpdir.mkdir("base") wals = tmpdir.mkdir("wals") backup_info = testing_helpers.build_test_backup_info(tablespaces=[]) backup_info.config.basebackups_directory = base.strpath backup_info.config.wals_directory = wals.strpath backup_info.version = 90400 datadir = base.mkdir(backup_info.backup_id).mkdir("data") backup_info.pgdata = datadir.strpath postgresql_conf_local = datadir.join("postgresql.conf") postgresql_auto_local = datadir.join("postgresql.auto.conf") postgresql_conf_local.write( "archive_command = something\n" "data_directory = something" ) postgresql_auto_local.write( "archive_command = something\n" "data_directory = something" ) shutil.copy2(postgresql_conf_local.strpath, dest.strpath) shutil.copy2(postgresql_auto_local.strpath, dest.strpath) # Avoid triggering warning for missing config files datadir.ensure("pg_hba.conf") datadir.ensure("pg_ident.conf") # Build an executor server = testing_helpers.build_real_server( global_conf={"barman_lock_directory": tmpdir.mkdir("lock").strpath}, main_conf={"wals_directory": wals.strpath}, ) executor = RecoveryExecutor(server.backup_manager) # test local recovery with closing(executor): rec_info = executor.recover(backup_info, dest.strpath, exclusive=True) # remove not useful keys from the result del rec_info["cmd"] sys_tempdir = rec_info["tempdir"] assert rec_info == { "rsync": None, "tempdir": sys_tempdir, "wal_dest": dest.join("pg_xlog").strpath, "recovery_dest": "local", "destination_path": dest.strpath, "temporary_configuration_files": [ dest.join("postgresql.conf").strpath, dest.join("postgresql.auto.conf").strpath, ], "results": { "delete_barman_wal": False, "recovery_start_time": rec_info["results"]["recovery_start_time"], "get_wal": False, "changes": [ Assertion._make(["postgresql.conf", 0, "archive_command", "false"]), Assertion._make( ["postgresql.auto.conf", 0, "archive_command", "false"] ), ], "missing_files": [], "recovery_configuration_file": "recovery.conf", "warnings": [ Assertion._make( ["postgresql.conf", 2, "data_directory", "something"] ), Assertion._make( ["postgresql.auto.conf", 2, "data_directory", "something"] ), ], }, "target_epoch": None, "configuration_files": ["postgresql.conf", "postgresql.auto.conf"], "target_datetime": None, "safe_horizon": None, "is_pitr": False, "get_wal": False, } # test remote recovery with closing(executor): rec_info = executor.recover( backup_info, dest.strpath, remote_command="remote@command", exclusive=True, ) # remove not useful keys from the result del rec_info["cmd"] del rec_info["rsync"] sys_tempdir = rec_info["tempdir"] assert rec_info == { "tempdir": sys_tempdir, "wal_dest": dest.join("pg_xlog").strpath, "recovery_dest": "remote", "destination_path": dest.strpath, "temporary_configuration_files": [ os.path.join(sys_tempdir, "postgresql.conf"), os.path.join(sys_tempdir, "postgresql.auto.conf"), ], "results": { "delete_barman_wal": False, "get_wal": False, "recovery_start_time": rec_info["results"]["recovery_start_time"], "changes": [ Assertion._make(["postgresql.conf", 0, "archive_command", "false"]), Assertion._make( ["postgresql.auto.conf", 0, "archive_command", "false"] ), ], "missing_files": [], "recovery_configuration_file": "recovery.conf", "warnings": [ Assertion._make( ["postgresql.conf", 2, "data_directory", "something"] ), Assertion._make( ["postgresql.auto.conf", 2, "data_directory", "something"] ), ], }, "target_epoch": None, "configuration_files": ["postgresql.conf", "postgresql.auto.conf"], "target_datetime": None, "safe_horizon": None, "is_pitr": False, "get_wal": False, } # test failed rsync rsync_pg_mock.side_effect = CommandFailedException() with pytest.raises(CommandFailedException): with closing(executor): executor.recover( backup_info, dest.strpath, exclusive=True, remote_command="remote@command", )
def test_generate_recovery_conf(self, rsync_pg_mock, tmpdir): """ Test the generation of recovery configuration :type tmpdir: py.path.local """ # Build basic folder/files structure recovery_info = { "configuration_files": ["postgresql.conf", "postgresql.auto.conf"], "tempdir": tmpdir.strpath, "results": {"changes": [], "warnings": []}, "get_wal": False, } backup_info = testing_helpers.build_test_backup_info( version=120000, ) dest = tmpdir.mkdir("destination") # Build a recovery executor using a real server server = testing_helpers.build_real_server() executor = RecoveryExecutor(server.backup_manager) executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", None, ) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # check for contents assert "recovery_end_command" in pg_auto_conf assert "recovery_target_time" in pg_auto_conf assert "recovery_target_timeline" in pg_auto_conf assert "recovery_target_xid" not in pg_auto_conf assert "recovery_target_lsn" not in pg_auto_conf assert "recovery_target_name" in pg_auto_conf assert "recovery_target" in pg_auto_conf assert pg_auto_conf["recovery_end_command"] == "'rm -fr barman_wal'" assert pg_auto_conf["recovery_target_time"] == "'2015-06-03 16:11:03.71038+02'" assert pg_auto_conf["recovery_target_timeline"] == "2" assert pg_auto_conf["recovery_target_name"] == "'target_name'" # Test 'pause_at_recovery_target' recovery_info entry signal_file.remove() recovery_info["pause_at_recovery_target"] = "on" executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", None, ) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # Finally check pause_at_recovery_target value assert pg_auto_conf["pause_at_recovery_target"] == "'on'" # Test 'recovery_target_action' signal_file.remove() del recovery_info["pause_at_recovery_target"] recovery_info["recovery_target_action"] = "pause" executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", None, ) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # Finally check recovery_target_action value assert pg_auto_conf["recovery_target_action"] == "'pause'" # Test 'standby_mode' signal_file.remove() executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", True, ) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the recovery.signal file doesn't exist wrong_signal_file = tmpdir.join("recovery.signal") assert not wrong_signal_file.check() # Check that the standby.signal file exists signal_file = tmpdir.join("standby.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # standby_mode is not a valid configuration in PostgreSQL 12 assert "standby_mode" not in pg_auto_conf signal_file.remove() executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", False, ) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the standby.signal file doesn't exist wrong_signal_file = tmpdir.join("standby.signal") assert not wrong_signal_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # standby_mode is not a valid configuration in PostgreSQL 12 assert "standby_mode" not in pg_auto_conf signal_file.remove() executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", None, ) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the standby.signal file doesn't exist wrong_signal_file = tmpdir.join("standby.signal") assert not wrong_signal_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # standby_mode is not a valid configuration in PostgreSQL 12 assert "standby_mode" not in pg_auto_conf
def test_generate_recovery_conf_pre12(self, rsync_pg_mock, tmpdir): """ Test the generation of recovery.conf file """ # Build basic folder/files structure recovery_info = { "configuration_files": ["postgresql.conf", "postgresql.auto.conf"], "tempdir": tmpdir.strpath, "results": {"changes": [], "warnings": []}, "get_wal": False, } backup_info = testing_helpers.build_test_backup_info() dest = tmpdir.mkdir("destination") # Build a recovery executor using a real server server = testing_helpers.build_real_server() executor = RecoveryExecutor(server.backup_manager) executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", None, ) # Check that the recovery.conf file exists recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() # Parse the generated recovery.conf recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) # check for contents assert "recovery_end_command" in recovery_conf assert "recovery_target_time" in recovery_conf assert "recovery_target_timeline" in recovery_conf assert "recovery_target_xid" not in recovery_conf assert "recovery_target_lsn" not in recovery_conf assert "recovery_target_name" in recovery_conf assert "recovery_target" not in recovery_conf assert recovery_conf["recovery_end_command"] == "'rm -fr barman_wal'" assert recovery_conf["recovery_target_time"] == "'2015-06-03 16:11:03.71038+02'" assert recovery_conf["recovery_target_timeline"] == "2" assert recovery_conf["recovery_target_name"] == "'target_name'" # Test 'pause_at_recovery_target' recovery_info entry recovery_info["pause_at_recovery_target"] = "on" executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", None, ) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert recovery_conf["pause_at_recovery_target"] == "'on'" # Test 'recovery_target_action' del recovery_info["pause_at_recovery_target"] recovery_info["recovery_target_action"] = "pause" executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", None, ) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert recovery_conf["recovery_target_action"] == "'pause'" # Test 'standby_mode' executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", True, ) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert recovery_conf["standby_mode"] == "'on'" executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", False, ) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert "standby_mode" not in recovery_conf executor._generate_recovery_conf( recovery_info, backup_info, dest.strpath, True, True, "remote@command", "target_name", "2015-06-03 16:11:03.71038+02", "2", "", "", None, ) recovery_conf_file = tmpdir.join("recovery.conf") assert recovery_conf_file.check() recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file) assert "standby_mode" not in recovery_conf
def test_pg_stat_archiver_output(self, remote_mock, capsys): """ Test management of pg_stat_archiver view output :param MagicMock connect_mock: mock the database connection :param capsys: retrieve output from consolle """ stats = { "failed_count": "2", "last_archived_wal": "000000010000000000000006", "last_archived_time": datetime.datetime.now(), "last_failed_wal": "000000010000000000000005", "last_failed_time": datetime.datetime.now(), "current_archived_wals_per_second": 1.0002, } remote_mock.return_value = dict(stats) server = build_real_server() server.server_version = 90400 server.config.description = None server.config.KEYS = [] server.config.last_backup_maximum_age = datetime.timedelta(days=1) # Mock the BackupExecutor.get_remote_status() method server.backup_manager.executor.get_remote_status = MagicMock( return_value={}) # testing for show-server command. # Expecting in the output the same values present into the stats dict server.show() (out, err) = capsys.readouterr() assert err == '' result = dict( item.strip('\t\n\r').split(": ") for item in out.split("\n") if item != '') assert result['failed_count'] == stats['failed_count'] assert result['last_archived_wal'] == stats['last_archived_wal'] assert result['last_archived_time'] == str(stats['last_archived_time']) assert result['last_failed_wal'] == stats['last_failed_wal'] assert result['last_failed_time'] == str(stats['last_failed_time']) assert result['current_archived_wals_per_second'] == \ str(stats['current_archived_wals_per_second']) # test output for status # Expecting: # Last archived WAL: # <last_archived_wal>, at <last_archived_time> # Failures of WAL archiver: # <failed_count> (<last_failed wal>, at <last_failed_time>) remote_mock.return_value = defaultdict(lambda: None, server_txt_version=1, **stats) server.status() (out, err) = capsys.readouterr() # clean the output result = dict( item.strip('\t\n\r').split(": ") for item in out.split("\n") if item != '') assert err == '' # check the result assert result['Last archived WAL'] == '%s, at %s' % ( stats['last_archived_wal'], stats['last_archived_time'].ctime()) assert result['Failures of WAL archiver'] == '%s (%s at %s)' % ( stats['failed_count'], stats['last_failed_wal'], stats['last_failed_time'].ctime())
def test_check_postgres(self, postgres_mock, capsys): """ Test management of check_postgres view output :param postgres_mock: mock get_remote_status function :param capsys: retrieve output from consolle """ postgres_mock.return_value = {'server_txt_version': None} # Create server server = build_real_server() # Case: no reply by PostgreSQL # Expect out: PostgreSQL: FAILED strategy = CheckOutputStrategy() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == ' PostgreSQL: FAILED\n' # Case: correct configuration postgres_mock.return_value = { 'current_xlog': None, 'archive_command': 'wal to archive', 'pgespresso_installed': None, 'server_txt_version': 'PostgresSQL 9_4', 'data_directory': '/usr/local/postgres', 'archive_mode': 'on', 'wal_level': 'archive' } # Expect out: all parameters: OK # Postgres version >= 9.0 - check wal_level server = build_real_server() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == "\tPostgreSQL: OK\n" \ "\twal_level: OK\n" # Postgres version < 9.0 - avoid wal_level check del postgres_mock.return_value['wal_level'] server = build_real_server() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == "\tPostgreSQL: OK\n" # Case: wal_level and archive_command values are not acceptable postgres_mock.return_value = { 'current_xlog': None, 'archive_command': None, 'pgespresso_installed': None, 'server_txt_version': 'PostgresSQL 9_4', 'data_directory': '/usr/local/postgres', 'archive_mode': 'on', 'wal_level': 'minimal' } # Expect out: some parameters: FAILED strategy = CheckOutputStrategy() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert out == "\tPostgreSQL: OK\n" \ "\twal_level: FAILED (please set it to a higher level " \ "than 'minimal')\n"
def test_copy(self, rsync_ignore_mock, tmpdir): """ Unit test for RsyncCopyController._copy's code """ # Build the prerequisites server = build_real_server( global_conf={'barman_home': tmpdir.mkdir('home').strpath}) config = server.config executor = server.backup_manager.executor # Create the RsyncCopyController putting the safe_horizon between # the tmp/safe and tmp2/check timestamps rcc = RsyncCopyController( path=server.path, ssh_command=executor.ssh_command, ssh_options=executor.ssh_options, network_compression=config.network_compression, reuse_backup=None, safe_horizon=datetime(year=2015, month=2, day=20, hour=19, minute=0, second=0, tzinfo=dateutil.tz.tzlocal())) backup_info = build_test_backup_info( server=server, pgdata="/pg/data", config_file="/etc/postgresql.conf", hba_file="/pg/data/pg_hba.conf", ident_file="/pg/data/pg_ident.conf", begin_xlog="0/2000028", begin_wal="000000010000000000000002", begin_offset=28) backup_info.save() # This is to check that all the preparation is done correctly assert os.path.exists(backup_info.filename) # Create an rsync mock rsync_mock = mock.Mock(name='Rsync()') # Then run the _copy method rcc._copy(rsync_mock, ':/pg/data/', backup_info.get_data_directory(), '/path/to/file.list', checksum=True) # Verify that _rsync_ignore_vanished_files has been called correctly assert rsync_ignore_mock.mock_calls == [ mock.call(rsync_mock, ':/pg/data/', backup_info.get_data_directory(), '--files-from=/path/to/file.list', '--checksum', check=True), ] # Try again without checksum rsync_ignore_mock.reset_mock() rcc._copy(rsync_mock, ':/pg/data/', backup_info.get_data_directory(), '/path/to/file.list', checksum=False) # Verify that _rsync_ignore_vanished_files has been called correctly assert rsync_ignore_mock.mock_calls == [ mock.call(rsync_mock, ':/pg/data/', backup_info.get_data_directory(), '--files-from=/path/to/file.list', check=True), ]
def test_statistics(self, signal_mock, tempfile_mock, copy_mock, create_and_purge_mock, analyse_mock, rsync_mock, tmpdir, workers): """ Unit test for RsyncCopyController.statistics's code """ # Do a fake copy run to populate the start/stop timestamps. # The steps are the same of the full run test tempdir = tmpdir.mkdir('tmp') tempfile_mock.return_value = tempdir.strpath server = build_real_server( global_conf={'barman_home': tmpdir.mkdir('home').strpath}) config = server.config executor = server.backup_manager.executor rcc = RsyncCopyController( path=server.path, ssh_command=executor.ssh_command, ssh_options=executor.ssh_options, network_compression=config.network_compression, reuse_backup=None, safe_horizon=None, workers=workers) backup_info = build_test_backup_info( server=server, pgdata="/pg/data", config_file="/etc/postgresql.conf", hba_file="/pg/data/pg_hba.conf", ident_file="/pg/data/pg_ident.conf", begin_xlog="0/2000028", begin_wal="000000010000000000000002", begin_offset=28) backup_info.save() # This is to check that all the preparation is done correctly assert os.path.exists(backup_info.filename) # Silence the access to result properties rsync_mock.return_value.out = '' rsync_mock.return_value.err = '' rsync_mock.return_value.ret = 0 # Mock analyze directory def analyse_func(item): label = item.label item.dir_file = label + '_dir_file' item.exclude_and_protect_file = label + '_exclude_and_protect_file' item.safe_list = [_FileItem('mode', 1, 'date', 'path')] item.check_list = [_FileItem('mode', 1, 'date', 'path')] analyse_mock.side_effect = analyse_func rcc.add_directory(label='tbs1', src=':/fake/location/', dst=backup_info.get_data_directory(16387), reuse=None, bwlimit=None, item_class=rcc.TABLESPACE_CLASS) rcc.add_directory(label='tbs2', src=':/another/location/', dst=backup_info.get_data_directory(16405), reuse=None, bwlimit=None, item_class=rcc.TABLESPACE_CLASS) rcc.add_directory( label='pgdata', src=':/pg/data/', dst=backup_info.get_data_directory(), reuse=None, bwlimit=None, item_class=rcc.PGDATA_CLASS, exclude=[ '/pg_xlog/*', '/pg_log/*', '/recovery.conf', '/postmaster.pid' ], exclude_and_protect=['pg_tblspc/16387', 'pg_tblspc/16405']) rcc.add_file(label='pg_control', src=':/pg/data/global/pg_control', dst='%s/global/pg_control' % backup_info.get_data_directory(), item_class=rcc.PGCONTROL_CLASS) rcc.add_file(label='config_file', src=':/etc/postgresql.conf', dst=backup_info.get_data_directory(), item_class=rcc.CONFIG_CLASS, optional=False) # Do the fake run rcc.copy() # Calculate statistics result = rcc.statistics() # We cannot check the actual result because it is not predictable, # so we check that every value is present and is a number and it is # greather than 0 assert result.get('analysis_time') > 0 assert 'analysis_time_per_item' in result for tbs in ('pgdata', 'tbs1', 'tbs2'): assert result['analysis_time_per_item'][tbs] > 0 assert result.get('copy_time') > 0 assert 'copy_time_per_item' in result assert 'serialized_copy_time_per_item' in result for tbs in ('pgdata', 'tbs1', 'tbs2', 'config_file', 'pg_control'): assert result['copy_time_per_item'][tbs] > 0 assert result['serialized_copy_time_per_item'][tbs] > 0 assert result.get('number_of_workers') == rcc.workers assert result.get('total_time') > 0
def test_analyze_directory(self, list_files_mock, rsync_factory_mock, tmpdir): """ Unit test for RsyncCopyController._analyze_directory's code """ # Build file list for ref ref_list = [ _FileItem( 'drwxrwxrwt', 69632, datetime(year=2015, month=2, day=9, hour=15, minute=1, second=0, tzinfo=dateutil.tz.tzlocal()), '.'), _FileItem( 'drwxrwxrwt', 69612, datetime(year=2015, month=2, day=19, hour=15, minute=1, second=22, tzinfo=dateutil.tz.tzlocal()), 'tmp'), _FileItem( '-rw-r--r--', 69632, datetime(year=2015, month=2, day=20, hour=18, minute=15, second=33, tzinfo=dateutil.tz.tzlocal()), 'tmp/safe'), _FileItem( '-rw-r--r--', 69612, datetime(year=2015, month=2, day=20, hour=19, minute=15, second=33, tzinfo=dateutil.tz.tzlocal()), 'tmp/check'), _FileItem( '-rw-r--r--', 69612, datetime(year=2015, month=2, day=20, hour=19, minute=15, second=33, tzinfo=dateutil.tz.tzlocal()), 'tmp/diff_time'), _FileItem( '-rw-r--r--', 69612, datetime(year=2015, month=2, day=20, hour=19, minute=15, second=33, tzinfo=dateutil.tz.tzlocal()), 'tmp/diff_size'), ] # Build the list for source adding a new file, ... src_list = ref_list + [ _FileItem( '-rw-r--r--', 69612, datetime(year=2015, month=2, day=20, hour=22, minute=15, second=33, tzinfo=dateutil.tz.tzlocal()), 'tmp/new'), ] # ... changing the timestamp one old file ... src_list[4] = _FileItem( '-rw-r--r--', 69612, datetime(year=2015, month=2, day=20, hour=20, minute=15, second=33, tzinfo=dateutil.tz.tzlocal()), 'tmp/diff_time') # ... and changing the size of another src_list[5] = _FileItem( '-rw-r--r--', 77777, datetime(year=2015, month=2, day=20, hour=19, minute=15, second=33, tzinfo=dateutil.tz.tzlocal()), 'tmp/diff_size') # Apply it to _list_files calls list_files_mock.side_effect = [ref_list, src_list] # Build the prerequisites server = build_real_server( global_conf={'barman_home': tmpdir.mkdir('home').strpath}) config = server.config executor = server.backup_manager.executor # Create the RsyncCopyController putting the safe_horizon between # the tmp/safe and tmp2/check timestamps rcc = RsyncCopyController( path=server.path, ssh_command=executor.ssh_command, ssh_options=executor.ssh_options, network_compression=config.network_compression, reuse_backup=None, safe_horizon=datetime(year=2015, month=2, day=20, hour=19, minute=0, second=0, tzinfo=dateutil.tz.tzlocal())) backup_info = build_test_backup_info( server=server, pgdata="/pg/data", config_file="/etc/postgresql.conf", hba_file="/pg/data/pg_hba.conf", ident_file="/pg/data/pg_ident.conf", begin_xlog="0/2000028", begin_wal="000000010000000000000002", begin_offset=28) backup_info.save() # This is to check that all the preparation is done correctly assert os.path.exists(backup_info.filename) # Add a temp dir (usually created by copy method rcc.temp_dir = tmpdir.mkdir('tmp').strpath # Create an item to inspect item = _RsyncCopyItem(label='pgdata', src=':/pg/data/', dst=backup_info.get_data_directory(), is_directory=True, item_class=rcc.PGDATA_CLASS, optional=False) # Then run the _analyze_directory method rcc._analyze_directory(item) # Verify that _rsync_factory has been called correctly assert rsync_factory_mock.mock_calls == [ mock.call(item), ] # Verify that _list_files has been called correctly assert list_files_mock.mock_calls == [ mock.call(rsync_factory_mock.return_value, backup_info.get_data_directory() + '/'), mock.call(rsync_factory_mock.return_value, ':/pg/data/') ] # Check the result # 1) The list of directories should be there and should contain all # the directories assert item.dir_file assert open(item.dir_file).read() == ('.\n' 'tmp\n') # The exclude_and_protect file should be populated correctly with all # the files in the source assert item.exclude_and_protect_file assert open( item.exclude_and_protect_file).read() == ('P /tmp/safe\n' '- /tmp/safe\n' 'P /tmp/check\n' '- /tmp/check\n' 'P /tmp/diff_time\n' '- /tmp/diff_time\n' 'P /tmp/diff_size\n' '- /tmp/diff_size\n' 'P /tmp/new\n' '- /tmp/new\n') # The check list must contain identical files after the safe_horizon assert len(item.check_list) == 1 assert item.check_list[0].path == 'tmp/check' # The safe list must contain every file that is not in check and is # present in the source assert len(item.safe_list) == 4 assert item.safe_list[0].path == 'tmp/safe' assert item.safe_list[1].path == 'tmp/diff_time' assert item.safe_list[2].path == 'tmp/diff_size' assert item.safe_list[3].path == 'tmp/new'
def test_create_dir_and_purge(self, rsync_ignore_mock, rsync_factory_mock, tmpdir): """ Unit test for RsyncCopyController._create_dir_and_purge's code """ # Build the prerequisites server = build_real_server( global_conf={'barman_home': tmpdir.mkdir('home').strpath}) config = server.config executor = server.backup_manager.executor # Create the RsyncCopyController putting the safe_horizon between # the tmp/safe and tmp2/check timestamps rcc = RsyncCopyController( path=server.path, ssh_command=executor.ssh_command, ssh_options=executor.ssh_options, network_compression=config.network_compression, reuse_backup=None, safe_horizon=datetime(year=2015, month=2, day=20, hour=19, minute=0, second=0, tzinfo=dateutil.tz.tzlocal())) backup_info = build_test_backup_info( server=server, pgdata="/pg/data", config_file="/etc/postgresql.conf", hba_file="/pg/data/pg_hba.conf", ident_file="/pg/data/pg_ident.conf", begin_xlog="0/2000028", begin_wal="000000010000000000000002", begin_offset=28) backup_info.save() # This is to check that all the preparation is done correctly assert os.path.exists(backup_info.filename) # Create an item to inspect item = _RsyncCopyItem(label='pgdata', src=':/pg/data/', dst=backup_info.get_data_directory(), is_directory=True, item_class=rcc.PGDATA_CLASS, optional=False) # Then run the _create_dir_and_purge method rcc._create_dir_and_purge(item) # Verify that _rsync_factory has been called correctly assert rsync_factory_mock.mock_calls == [ mock.call(item), ] # Verify that _rsync_ignore_vanished_files has been called correctly assert rsync_ignore_mock.mock_calls == [ mock.call(rsync_factory_mock.return_value, '--recursive', '--delete', '--files-from=None', '--filter', 'merge None', ':/pg/data/', backup_info.get_data_directory(), check=True), ]
def test_generate_recovery_conf(self, rsync_pg_mock, tmpdir): """ Test the generation of recovery configuration :type tmpdir: py.path.local """ # Build basic folder/files structure recovery_info = { 'configuration_files': ['postgresql.conf', 'postgresql.auto.conf'], 'tempdir': tmpdir.strpath, 'results': {'changes': [], 'warnings': []}, 'get_wal': False, } backup_info = testing_helpers.build_test_backup_info( version=120000, ) dest = tmpdir.mkdir('destination') # Build a recovery executor using a real server server = testing_helpers.build_real_server() executor = RecoveryExecutor(server.backup_manager) executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', None) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # check for contents assert 'recovery_end_command' in pg_auto_conf assert 'recovery_target_time' in pg_auto_conf assert 'recovery_target_timeline' in pg_auto_conf assert 'recovery_target_xid' not in pg_auto_conf assert 'recovery_target_lsn' not in pg_auto_conf assert 'recovery_target_name' in pg_auto_conf assert 'recovery_target' in pg_auto_conf assert pg_auto_conf['recovery_end_command'] == "'rm -fr barman_wal'" assert pg_auto_conf['recovery_target_time'] == \ "'2015-06-03 16:11:03.71038+02'" assert pg_auto_conf['recovery_target_timeline'] == '2' assert pg_auto_conf['recovery_target_name'] == "'target_name'" # Test 'pause_at_recovery_target' recovery_info entry signal_file.remove() recovery_info['pause_at_recovery_target'] = 'on' executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', None) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # Finally check pause_at_recovery_target value assert pg_auto_conf['pause_at_recovery_target'] == "'on'" # Test 'recovery_target_action' signal_file.remove() del recovery_info['pause_at_recovery_target'] recovery_info['recovery_target_action'] = 'pause' executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', None) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # Finally check recovery_target_action value assert pg_auto_conf['recovery_target_action'] == "'pause'" # Test 'standby_mode' signal_file.remove() executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', True) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the recovery.signal file doesn't exist wrong_signal_file = tmpdir.join("recovery.signal") assert not wrong_signal_file.check() # Check that the standby.signal file exists signal_file = tmpdir.join("standby.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # standby_mode is not a valid configuration in PostgreSQL 12 assert 'standby_mode' not in pg_auto_conf signal_file.remove() executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', False) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the standby.signal file doesn't exist wrong_signal_file = tmpdir.join("standby.signal") assert not wrong_signal_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # standby_mode is not a valid configuration in PostgreSQL 12 assert 'standby_mode' not in pg_auto_conf signal_file.remove() executor._generate_recovery_conf(recovery_info, backup_info, dest.strpath, True, True, 'remote@command', 'target_name', '2015-06-03 16:11:03.71038+02', '2', '', '', None) # Check that the recovery.conf file doesn't exist recovery_conf_file = tmpdir.join("recovery.conf") assert not recovery_conf_file.check() # Check that the standby.signal file doesn't exist wrong_signal_file = tmpdir.join("standby.signal") assert not wrong_signal_file.check() # Check that the recovery.signal file exists signal_file = tmpdir.join("recovery.signal") assert signal_file.check() # Parse the generated recovery configuration pg_auto_conf = self.parse_auto_conf_lines(recovery_info) # standby_mode is not a valid configuration in PostgreSQL 12 assert 'standby_mode' not in pg_auto_conf
def test_full_copy(self, signal_mock, tempfile_mock, copy_mock, create_and_purge_mock, analyse_mock, rsync_mock, tmpdir): """ Test the execution of a full copy """ # Build the prerequisites tempdir = tmpdir.mkdir('tmp') tempfile_mock.return_value = tempdir.strpath server = build_real_server( global_conf={'barman_home': tmpdir.mkdir('home').strpath}) config = server.config executor = server.backup_manager.executor rcc = RsyncCopyController( path=server.path, ssh_command=executor.ssh_command, ssh_options=executor.ssh_options, network_compression=config.network_compression, reuse_backup=None, safe_horizon=None) backup_info = build_test_backup_info( server=server, pgdata="/pg/data", config_file="/etc/postgresql.conf", hba_file="/pg/data/pg_hba.conf", ident_file="/pg/data/pg_ident.conf", begin_xlog="0/2000028", begin_wal="000000010000000000000002", begin_offset=28) backup_info.save() # This is to check that all the preparation is done correctly assert os.path.exists(backup_info.filename) # Silence the access to result properties rsync_mock.return_value.out = '' rsync_mock.return_value.err = '' rsync_mock.return_value.ret = 0 # Mock analyze directory def analyse_func(item): label = item.label item.dir_file = label + '_dir_file' item.exclude_and_protect_file = label + '_exclude_and_protect_file' item.safe_list = [_FileItem('mode', 1, 'date', 'path')] item.check_list = [_FileItem('mode', 1, 'date', 'path')] analyse_mock.side_effect = analyse_func rcc.add_directory(label='tbs1', src=':/fake/location/', dst=backup_info.get_data_directory(16387), reuse=None, bwlimit=None, item_class=rcc.TABLESPACE_CLASS) rcc.add_directory(label='tbs2', src=':/another/location/', dst=backup_info.get_data_directory(16405), reuse=None, bwlimit=None, item_class=rcc.TABLESPACE_CLASS) rcc.add_directory( label='pgdata', src=':/pg/data/', dst=backup_info.get_data_directory(), reuse=None, bwlimit=None, item_class=rcc.PGDATA_CLASS, exclude=[ '/pg_xlog/*', '/pg_log/*', '/recovery.conf', '/postmaster.pid' ], exclude_and_protect=['pg_tblspc/16387', 'pg_tblspc/16405']) rcc.add_file(label='pg_control', src=':/pg/data/global/pg_control', dst='%s/global/pg_control' % backup_info.get_data_directory(), item_class=rcc.PGCONTROL_CLASS) rcc.add_file(label='config_file', src=':/etc/postgresql.conf', dst=backup_info.get_data_directory(), item_class=rcc.CONFIG_CLASS, optional=False) rcc.copy() # Check the order of calls to the Rsync mock assert rsync_mock.mock_calls == [ mock.call(network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=None, exclude_and_protect=None, include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call(network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=None, exclude_and_protect=None, include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call( network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=[ '/pg_xlog/*', '/pg_log/*', '/recovery.conf', '/postmaster.pid' ], exclude_and_protect=['pg_tblspc/16387', 'pg_tblspc/16405'], include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call(network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=None, exclude_and_protect=None, include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call()(':/etc/postgresql.conf', backup_info.get_data_directory(), allowed_retval=(0, 23, 24)), mock.call(network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=None, exclude_and_protect=None, include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call()(':/pg/data/global/pg_control', '%s/global/pg_control' % backup_info.get_data_directory(), allowed_retval=(0, 23, 24)), ] # Check calls to _analyse_directory method assert analyse_mock.mock_calls == [ mock.call(item) for item in rcc.item_list if item.is_directory ] # Check calls to _create_dir_and_purge method assert create_and_purge_mock.mock_calls == [ mock.call(item) for item in rcc.item_list if item.is_directory ] # Utility function to build the file_list name def file_list_name(label, kind): return '%s/%s_%s_%s.list' % (tempdir.strpath, label, kind, os.getpid()) # Check the order of calls to the copy method # All the file_list arguments are None because the analyze part # has not really been executed assert copy_mock.mock_calls == [ mock.call(mock.ANY, ':/fake/location/', backup_info.get_data_directory(16387), checksum=False, file_list=file_list_name('tbs1', 'safe')), mock.call(mock.ANY, ':/fake/location/', backup_info.get_data_directory(16387), checksum=True, file_list=file_list_name('tbs1', 'check')), mock.call(mock.ANY, ':/another/location/', backup_info.get_data_directory(16405), checksum=False, file_list=file_list_name('tbs2', 'safe')), mock.call(mock.ANY, ':/another/location/', backup_info.get_data_directory(16405), checksum=True, file_list=file_list_name('tbs2', 'check')), mock.call(mock.ANY, ':/pg/data/', backup_info.get_data_directory(), checksum=False, file_list=file_list_name('pgdata', 'safe')), mock.call(mock.ANY, ':/pg/data/', backup_info.get_data_directory(), checksum=True, file_list=file_list_name('pgdata', 'check')), ]
def test_status(self, capsys, tmpdir): """ Test the status method. Given a test xlog.db expect the method to produce a json output. Compare the produced json with the EXPECTED_MINIMAL map :param path tmpdir: py.test temporary directory unique to the test :param capsys: fixture that allow to access stdout/stderr output """ # Create a test xlog.db tmp_path = tmpdir.join("xlog.db") tmp_path.write( "000000010000000000000001\t16777216\t1406019022.4\tNone\n" "000000010000000000000002\t16777216\t1406019026.0\tNone\n" "000000010000000000000003\t16777216\t1406019026.0\tNone\n" "000000010000000000000004\t16777216\t1406019329.93\tNone\n" "000000010000000000000005\t16777216\t1406019330.84\tNone\n" ) # Build a server, replacing some function to use the tmpdir objects server = build_real_server() server.xlogdb = lambda: tmp_path.open() server.get_available_backups = lambda: { "1234567890": build_test_backup_info( server=server, begin_time=dateutil.parser.parse("Wed Jul 23 11:00:43 2014"), end_time=dateutil.parser.parse("Wed Jul 23 12:00:43 2014"), ) } # Call the status method capturing the output using capsys server.sync_status(None, None) (out, err) = capsys.readouterr() # prepare the expected results # (complex values have to be converted to json) expected = dict(EXPECTED_MINIMAL) expected["config"] = dict( [ (k, v.to_json() if hasattr(v, "to_json") else v) for k, v in server.config.to_json().items() ] ) assert json.loads(out) == expected # Test that status method raises a SyncError # if last_wal is older than the first entry of the xlog.db with pytest.raises(SyncError): server.sync_status("000000010000000000000000") # Test that status method raises a SyncError # if last_wal is newer than the last entry of the xlog.db with pytest.raises(SyncError): server.sync_status("000000010000000000000007") # test with an empty file tmp_path.write("") server.sync_status("000000010000000000000001") (out, err) = capsys.readouterr() result = json.loads(out) assert result["last_position"] == 0 assert result["last_name"] == ""
def test_recover_xlog(self, rsync_pg_mock, cm_mock, tmpdir): """ Test the recovery of the xlogs of a backup :param rsync_pg_mock: Mock rsync object for the purpose if this test """ # Build basic folders/files structure dest = tmpdir.mkdir('destination') wals = tmpdir.mkdir('wals') # Create 3 WAL files with different compressions xlog_dir = wals.mkdir(xlog.hash_dir('000000000000000000000002')) xlog_plain = xlog_dir.join('000000000000000000000001') xlog_gz = xlog_dir.join('000000000000000000000002') xlog_bz2 = xlog_dir.join('000000000000000000000003') xlog_plain.write('dummy content') xlog_gz.write('dummy content gz') xlog_bz2.write('dummy content bz2') server = testing_helpers.build_real_server( main_conf={'wals_directory': wals.strpath}) # Prepare compressors mock c = { 'gzip': mock.Mock(name='gzip'), 'bzip2': mock.Mock(name='bzip2'), } cm_mock.return_value.get_compressor = \ lambda compression=None: c[compression] # touch destination files to avoid errors on cleanup c['gzip'].decompress.side_effect = lambda src, dst: open(dst, 'w') c['bzip2'].decompress.side_effect = lambda src, dst: open(dst, 'w') # Build executor executor = RecoveryExecutor(server.backup_manager) # Test: local copy required_wals = ( WalFileInfo.from_xlogdb_line( '000000000000000000000001\t42\t43\tNone\n'), WalFileInfo.from_xlogdb_line( '000000000000000000000002\t42\t43\tgzip\n'), WalFileInfo.from_xlogdb_line( '000000000000000000000003\t42\t43\tbzip2\n'), ) executor._xlog_copy(required_wals, dest.strpath, None) # Check for a correct invocation of rsync using local paths rsync_pg_mock.assert_called_once_with( network_compression=False, bwlimit=None, path=None, ssh=None) assert not rsync_pg_mock.return_value.from_file_list.called c['gzip'].decompress.assert_called_once_with(xlog_gz.strpath, mock.ANY) c['bzip2'].decompress.assert_called_once_with(xlog_bz2.strpath, mock.ANY) # Reset mock calls rsync_pg_mock.reset_mock() c['gzip'].reset_mock() c['bzip2'].reset_mock() # Test: remote copy executor._xlog_copy(required_wals, dest.strpath, 'remote_command') # Check for the invocation of rsync on a remote call rsync_pg_mock.assert_called_once_with( network_compression=False, bwlimit=None, path=mock.ANY, ssh='remote_command') rsync_pg_mock.return_value.from_file_list.assert_called_once_with( [ '000000000000000000000001', '000000000000000000000002', '000000000000000000000003'], mock.ANY, mock.ANY) c['gzip'].decompress.assert_called_once_with(xlog_gz.strpath, mock.ANY) c['bzip2'].decompress.assert_called_once_with(xlog_bz2.strpath, mock.ANY)
def test_recovery(self, remote_cmd_mock, rsync_pg_mock, copy_controller_mock, tmpdir): """ Test the execution of a recovery """ # Prepare basic directory/files structure dest = tmpdir.mkdir('destination') base = tmpdir.mkdir('base') wals = tmpdir.mkdir('wals') backup_info = testing_helpers.build_test_backup_info(tablespaces=[]) backup_info.config.basebackups_directory = base.strpath backup_info.config.wals_directory = wals.strpath backup_info.version = 90400 datadir = base.mkdir(backup_info.backup_id).mkdir('data') backup_info.pgdata = datadir.strpath postgresql_conf_local = datadir.join('postgresql.conf') postgresql_auto_local = datadir.join('postgresql.auto.conf') postgresql_conf_local.write('archive_command = something\n' 'data_directory = something') postgresql_auto_local.write('archive_command = something\n' 'data_directory = something') shutil.copy2(postgresql_conf_local.strpath, dest.strpath) shutil.copy2(postgresql_auto_local.strpath, dest.strpath) # Avoid triggering warning for missing config files datadir.ensure('pg_hba.conf') datadir.ensure('pg_ident.conf') # Build an executor server = testing_helpers.build_real_server( global_conf={ "barman_lock_directory": tmpdir.mkdir('lock').strpath }, main_conf={ "wals_directory": wals.strpath }) executor = RecoveryExecutor(server.backup_manager) # test local recovery with closing(executor): rec_info = executor.recover(backup_info, dest.strpath, exclusive=True) # remove not useful keys from the result del rec_info['cmd'] sys_tempdir = rec_info['tempdir'] assert rec_info == { 'rsync': None, 'tempdir': sys_tempdir, 'wal_dest': dest.join('pg_xlog').strpath, 'recovery_dest': 'local', 'destination_path': dest.strpath, 'temporary_configuration_files': [ dest.join('postgresql.conf').strpath, dest.join('postgresql.auto.conf').strpath], 'results': { 'delete_barman_wal': False, 'recovery_start_time': rec_info['results'][ 'recovery_start_time' ], 'get_wal': False, 'changes': [ Assertion._make([ 'postgresql.conf', 0, 'archive_command', 'false']), Assertion._make([ 'postgresql.auto.conf', 0, 'archive_command', 'false'])], 'missing_files': [], 'recovery_configuration_file': 'recovery.conf', 'warnings': [ Assertion._make([ 'postgresql.conf', 2, 'data_directory', 'something']), Assertion._make([ 'postgresql.auto.conf', 2, 'data_directory', 'something'])]}, 'target_epoch': None, 'configuration_files': [ 'postgresql.conf', 'postgresql.auto.conf'], 'target_datetime': None, 'safe_horizon': None, 'is_pitr': False, 'get_wal': False, } # test remote recovery with closing(executor): rec_info = executor.recover(backup_info, dest.strpath, remote_command="remote@command", exclusive=True) # remove not useful keys from the result del rec_info['cmd'] del rec_info['rsync'] sys_tempdir = rec_info['tempdir'] assert rec_info == { 'tempdir': sys_tempdir, 'wal_dest': dest.join('pg_xlog').strpath, 'recovery_dest': 'remote', 'destination_path': dest.strpath, 'temporary_configuration_files': [ os.path.join(sys_tempdir, 'postgresql.conf'), os.path.join(sys_tempdir, 'postgresql.auto.conf')], 'results': { 'delete_barman_wal': False, 'get_wal': False, 'recovery_start_time': rec_info['results'][ 'recovery_start_time' ], 'changes': [ Assertion._make([ 'postgresql.conf', 0, 'archive_command', 'false']), Assertion._make([ 'postgresql.auto.conf', 0, 'archive_command', 'false'])], 'missing_files': [], 'recovery_configuration_file': 'recovery.conf', 'warnings': [ Assertion._make([ 'postgresql.conf', 2, 'data_directory', 'something']), Assertion._make([ 'postgresql.auto.conf', 2, 'data_directory', 'something'])]}, 'target_epoch': None, 'configuration_files': [ 'postgresql.conf', 'postgresql.auto.conf'], 'target_datetime': None, 'safe_horizon': None, 'is_pitr': False, 'get_wal': False, } # test failed rsync rsync_pg_mock.side_effect = CommandFailedException() with pytest.raises(CommandFailedException): with closing(executor): executor.recover(backup_info, dest.strpath, exclusive=True, remote_command="remote@command")
def test_full_copy(self, smart_copy_mock, rsync_mock, tmpdir): """ Test the execution of a rsync copy :param rsync_mock: mock for the rsync command :param tmpdir: temporary dir """ # Build the prerequisites server = build_real_server( global_conf={'barman_home': tmpdir.mkdir('home').strpath}) config = server.config executor = server.backup_manager.executor rcc = RsyncCopyController( path=server.path, ssh_command=executor.ssh_command, ssh_options=executor.ssh_options, network_compression=config.network_compression, reuse_backup=None, safe_horizon=None) backup_info = build_test_backup_info( server=server, pgdata="/pg/data", config_file="/etc/postgresql.conf", hba_file="/pg/data/pg_hba.conf", ident_file="/pg/data/pg_ident.conf", begin_xlog="0/2000028", begin_wal="000000010000000000000002", begin_offset=28) backup_info.save() # This is to check that all the preparation is done correctly assert os.path.exists(backup_info.filename) # Silence the access to result properties rsync_mock.return_value.out = '' rsync_mock.return_value.err = '' rsync_mock.return_value.ret = 0 rcc.add_directory(label='tbs1', src=':/fake/location/', dst=backup_info.get_data_directory(16387), reuse=None, bwlimit=None, item_class=rcc.TABLESPACE_CLASS), rcc.add_directory(label='tbs2', src=':/another/location/', dst=backup_info.get_data_directory(16405), reuse=None, bwlimit=None, item_class=rcc.TABLESPACE_CLASS), rcc.add_directory( label='pgdata', src=':/pg/data/', dst=backup_info.get_data_directory(), reuse=None, bwlimit=None, item_class=rcc.PGDATA_CLASS, exclude=[ '/pg_xlog/*', '/pg_log/*', '/recovery.conf', '/postmaster.pid' ], exclude_and_protect=['pg_tblspc/16387', 'pg_tblspc/16405']), rcc.add_file(label='pg_control', src=':/pg/data/global/pg_control', dst='%s/global/pg_control' % backup_info.get_data_directory(), item_class=rcc.PGCONTROL_CLASS), rcc.add_file(label='config_file', src=':/etc/postgresql.conf', dst=backup_info.get_data_directory(), item_class=rcc.CONFIG_CLASS, optional=False), rcc.copy(), assert rsync_mock.mock_calls == [ mock.call(network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=None, exclude_and_protect=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call(network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=None, exclude_and_protect=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call( network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=[ '/pg_xlog/*', '/pg_log/*', '/recovery.conf', '/postmaster.pid' ], exclude_and_protect=['pg_tblspc/16387', 'pg_tblspc/16405'], retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call(network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=None, exclude_and_protect=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call()(':/pg/data/global/pg_control', '%s/global/pg_control' % backup_info.get_data_directory(), allowed_retval=(0, 23, 24)), mock.call(network_compression=False, args=['--itemize-changes', '--itemize-changes'], bwlimit=None, ssh='ssh', path=None, ssh_options=[ '-c', '"arcfour"', '-p', '22', '*****@*****.**', '-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no' ], exclude=None, exclude_and_protect=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY), mock.call()(':/etc/postgresql.conf', backup_info.get_data_directory(), allowed_retval=(0, 23, 24)), ] assert smart_copy_mock.mock_calls == [ mock.call(mock.ANY, ':/fake/location/', backup_info.get_data_directory(16387), None, None), mock.call(mock.ANY, ':/another/location/', backup_info.get_data_directory(16405), None, None), mock.call(mock.ANY, ':/pg/data/', backup_info.get_data_directory(), None, None), ]
def test_sync_backup(self, logger_mock, rsync_mock, tmpdir, capsys): """ Test the synchronisation method, testing all the possible error conditions. :param MagicMock logger_mock: MagicMock obj mimicking the logger :param MagicMock rsync_mock: MagicMock replacing Rsync class :param py.local.path tmpdir: py.test temporary directory :param capsys: fixture that allow to access stdout/stderr output """ backup_name = '1234567890' server_name = 'main' # Prepare paths backup_dir = tmpdir.mkdir(server_name) basebackup_dir = backup_dir.mkdir("base") full_backup_path = basebackup_dir.mkdir(backup_name) primary_info_file = backup_dir.join(barman.server.PRIMARY_INFO_FILE) # prepare the primary_info file remote_basebackup_dir = tmpdir.mkdir("primary") primary_info_content = dict(EXPECTED_MINIMAL) primary_info_content['config'].update( basebackups_directory=str(remote_basebackup_dir)) primary_info_file.write(json.dumps(primary_info_content)) # Test 1: Not a passive node. # Expect SyncError server = build_real_server( global_conf={'barman_lock_directory': tmpdir.strpath}, main_conf={'backup_directory': backup_dir.strpath}) with pytest.raises(SyncError): server.sync_backup(backup_name) # Test 2: normal sync execution, no error expected. # test for all the step on the logger logger_mock.reset_mock() server = build_real_server( global_conf={'barman_lock_directory': tmpdir.strpath}, main_conf={ 'backup_directory': backup_dir.strpath, 'primary_ssh_command': 'ssh fakeuser@fakehost' }) server.sync_backup(backup_name) logger_mock.info.assert_any_call( "Synchronising with server %s backup %s: step 1/3: " "parse server information", server_name, backup_name) logger_mock.info.assert_any_call( "Synchronising with server %s backup %s: step 2/3: " "file copy", server_name, backup_name) logger_mock.info.assert_any_call( "Synchronising with server %s backup %s: step 3/3: " "finalise sync", server_name, backup_name) # Test 3: test Rsync Failure # Expect a BackupInfo object with status "FAILED" # and a error message on the "error" field of the obj rsync_mock.reset_mock() server.backup_manager._backup_cache = {} rsync_mock.side_effect = CommandFailedException("TestFailure") full_backup_path.remove(rec=1) server.sync_backup(backup_name) backup_info = server.get_backup(backup_name) assert backup_info.status == BackupInfo.FAILED assert backup_info.error == 'failure syncing server main ' \ 'backup 1234567890: TestFailure' # Test 4: test KeyboardInterrupt management # Check the error message for the KeyboardInterrupt event rsync_mock.reset_mock() rsync_mock.side_effect = CommandFailedException("TestFailure") full_backup_path.remove(rec=1) rsync_mock.side_effect = KeyboardInterrupt() server.sync_backup(backup_name) backup_info = server.get_backup(backup_name) assert backup_info.status == BackupInfo.FAILED assert backup_info.error == 'failure syncing server main ' \ 'backup 1234567890: KeyboardInterrupt' # Test 5: test backup name not present on Master server # Expect a error message on stderr rsync_mock.reset_mock() rsync_mock.side_effect = CommandFailedException("TestFailure") full_backup_path.remove(rec=1) server.sync_backup('wrong_backup_name') (out, err) = capsys.readouterr() # Check the stderr using capsys. we need only the first line # from stderr e = err.split('\n') assert 'ERROR: failure syncing server main ' \ 'backup 1234567890: TestFailure' in e # Test 5: Backup already synced # Check for the warning message on the stout using capsys rsync_mock.reset_mock() rsync_mock.side_effect = None # do it the first time and check it succeeded server.sync_backup(backup_name) backup_info = server.get_backup(backup_name) assert backup_info.status == BackupInfo.DONE # do it again ant test it does not call rsync rsync_mock.reset_mock() server.sync_backup(backup_name) assert not rsync_mock.called (out, err) = capsys.readouterr() assert out.strip() == 'Backup 1234567890 is already' \ ' synced with main server'
def test_full_copy( self, signal_mock, tempfile_mock, copy_mock, create_and_purge_mock, analyse_mock, rsync_mock, tmpdir, ): """ Test the execution of a full copy """ # Build the prerequisites tempdir = tmpdir.mkdir("tmp") tempfile_mock.return_value = tempdir.strpath server = build_real_server( global_conf={"barman_home": tmpdir.mkdir("home").strpath}) config = server.config executor = server.backup_manager.executor rcc = RsyncCopyController( path=server.path, ssh_command=executor.ssh_command, ssh_options=executor.ssh_options, network_compression=config.network_compression, reuse_backup=None, safe_horizon=None, ) backup_info = build_test_backup_info( server=server, pgdata="/pg/data", config_file="/etc/postgresql.conf", hba_file="/pg/data/pg_hba.conf", ident_file="/pg/data/pg_ident.conf", begin_xlog="0/2000028", begin_wal="000000010000000000000002", begin_offset=28, ) backup_info.save() # This is to check that all the preparation is done correctly assert os.path.exists(backup_info.filename) # Silence the access to result properties rsync_mock.return_value.out = "" rsync_mock.return_value.err = "" rsync_mock.return_value.ret = 0 # Mock analyze directory def analyse_func(item): label = item.label item.dir_file = label + "_dir_file" item.exclude_and_protect_file = label + "_exclude_and_protect_file" item.safe_list = [_FileItem("mode", 1, "date", "path")] item.check_list = [_FileItem("mode", 1, "date", "path")] analyse_mock.side_effect = analyse_func rcc.add_directory( label="tbs1", src=":/fake/location/", dst=backup_info.get_data_directory(16387), reuse=None, bwlimit=None, item_class=rcc.TABLESPACE_CLASS, ) rcc.add_directory( label="tbs2", src=":/another/location/", dst=backup_info.get_data_directory(16405), reuse=None, bwlimit=None, item_class=rcc.TABLESPACE_CLASS, ) rcc.add_directory( label="pgdata", src=":/pg/data/", dst=backup_info.get_data_directory(), reuse=None, bwlimit=None, item_class=rcc.PGDATA_CLASS, exclude=[ "/pg_xlog/*", "/pg_log/*", "/log/*", "/recovery.conf", "/postmaster.pid", ], exclude_and_protect=["pg_tblspc/16387", "pg_tblspc/16405"], ) rcc.add_file( label="pg_control", src=":/pg/data/global/pg_control", dst="%s/global/pg_control" % backup_info.get_data_directory(), item_class=rcc.PGCONTROL_CLASS, ) rcc.add_file( label="config_file", src=":/etc/postgresql.conf", dst=backup_info.get_data_directory(), item_class=rcc.CONFIG_CLASS, optional=False, ) rcc.copy() # Check the order of calls to the Rsync mock assert rsync_mock.mock_calls == [ mock.call( network_compression=False, args=[ "--ignore-missing-args", "--itemize-changes", "--itemize-changes", ], bwlimit=None, ssh="ssh", path=None, ssh_options=[ "-c", '"arcfour"', "-p", "22", "*****@*****.**", "-o", "BatchMode=yes", "-o", "StrictHostKeyChecking=no", ], exclude=None, exclude_and_protect=None, include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY, ), mock.call( network_compression=False, args=[ "--ignore-missing-args", "--itemize-changes", "--itemize-changes", ], bwlimit=None, ssh="ssh", path=None, ssh_options=[ "-c", '"arcfour"', "-p", "22", "*****@*****.**", "-o", "BatchMode=yes", "-o", "StrictHostKeyChecking=no", ], exclude=None, exclude_and_protect=None, include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY, ), mock.call( network_compression=False, args=[ "--ignore-missing-args", "--itemize-changes", "--itemize-changes", ], bwlimit=None, ssh="ssh", path=None, ssh_options=[ "-c", '"arcfour"', "-p", "22", "*****@*****.**", "-o", "BatchMode=yes", "-o", "StrictHostKeyChecking=no", ], exclude=[ "/pg_xlog/*", "/pg_log/*", "/log/*", "/recovery.conf", "/postmaster.pid", ], exclude_and_protect=["pg_tblspc/16387", "pg_tblspc/16405"], include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY, ), mock.call( network_compression=False, args=[ "--ignore-missing-args", "--itemize-changes", "--itemize-changes", ], bwlimit=None, ssh="ssh", path=None, ssh_options=[ "-c", '"arcfour"', "-p", "22", "*****@*****.**", "-o", "BatchMode=yes", "-o", "StrictHostKeyChecking=no", ], exclude=None, exclude_and_protect=None, include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY, ), mock.call()( ":/etc/postgresql.conf", backup_info.get_data_directory(), allowed_retval=(0, 23, 24), ), mock.call( network_compression=False, args=[ "--ignore-missing-args", "--itemize-changes", "--itemize-changes", ], bwlimit=None, ssh="ssh", path=None, ssh_options=[ "-c", '"arcfour"', "-p", "22", "*****@*****.**", "-o", "BatchMode=yes", "-o", "StrictHostKeyChecking=no", ], exclude=None, exclude_and_protect=None, include=None, retry_sleep=0, retry_times=0, retry_handler=mock.ANY, ), mock.call()( ":/pg/data/global/pg_control", "%s/global/pg_control" % backup_info.get_data_directory(), allowed_retval=(0, 23, 24), ), ] # Check calls to _analyse_directory method assert analyse_mock.mock_calls == [ mock.call(item) for item in rcc.item_list if item.is_directory ] # Check calls to _create_dir_and_purge method assert create_and_purge_mock.mock_calls == [ mock.call(item) for item in rcc.item_list if item.is_directory ] # Utility function to build the file_list name def file_list_name(label, kind): return "%s/%s_%s_%s.list" % (tempdir.strpath, label, kind, os.getpid()) # Check the order of calls to the copy method # All the file_list arguments are None because the analyze part # has not really been executed assert copy_mock.mock_calls == [ mock.call( mock.ANY, ":/fake/location/", backup_info.get_data_directory(16387), checksum=False, file_list=file_list_name("tbs1", "safe"), ), mock.call( mock.ANY, ":/fake/location/", backup_info.get_data_directory(16387), checksum=True, file_list=file_list_name("tbs1", "check"), ), mock.call( mock.ANY, ":/another/location/", backup_info.get_data_directory(16405), checksum=False, file_list=file_list_name("tbs2", "safe"), ), mock.call( mock.ANY, ":/another/location/", backup_info.get_data_directory(16405), checksum=True, file_list=file_list_name("tbs2", "check"), ), mock.call( mock.ANY, ":/pg/data/", backup_info.get_data_directory(), checksum=False, file_list=file_list_name("pgdata", "safe"), ), mock.call( mock.ANY, ":/pg/data/", backup_info.get_data_directory(), checksum=True, file_list=file_list_name("pgdata", "check"), ), ]
def test_sync_wals(self, rsync_mock, tmpdir, capsys): """ Test the WAL synchronisation method, testing all the possible error conditions. :param MagicMock rsync_mock: MagicMock replacing Rsync class :param py.local.path tmpdir: py.test temporary directory :param capsys: fixture that allow to access stdout/stderr output """ server_name = 'main' # Prepare paths barman_home = tmpdir.mkdir("barman_home") backup_dir = barman_home.mkdir(server_name) wals_dir = backup_dir.mkdir("wals") primary_info_file = backup_dir.join(barman.server.PRIMARY_INFO_FILE) # prepare the primary_info file remote_basebackup_dir = tmpdir.mkdir("primary") primary_info_content = dict(EXPECTED_MINIMAL) primary_info_content['config'].update( compression=None, basebackups_directory=str(remote_basebackup_dir), wals_directory=str(wals_dir)) primary_info_file.write(json.dumps(primary_info_content)) # Test 1: Not a passive node. # Expect SyncError server = build_real_server(global_conf=dict( barman_home=str(barman_home))) with pytest.raises(SyncError): server.sync_wals() # Test 2: different compression between Master and Passive node. # Expect a SyncError server = build_real_server( global_conf=dict(barman_home=str(barman_home)), main_conf=dict(compression='gzip', primary_ssh_command='ssh fakeuser@fakehost')) server.sync_wals() (out, err) = capsys.readouterr() assert "Compression method on server %s " % server_name in err # Test 3: No base backup for server, exit with warning server = build_real_server( global_conf=dict(barman_home=str(barman_home)), main_conf=dict(compression=None, wals_directory=str(wals_dir), primary_ssh_command='ssh fakeuser@fakehost')) server.sync_wals() (out, err) = capsys.readouterr() assert 'WARNING: No base backup for ' \ 'server %s' % server.config.name in err # Test 4: No wal synchronisation required, expect a warning # set return for get_first_backup and get_backup methods server.get_first_backup_id = lambda: "too_new" server.get_backup = lambda x: build_test_backup_info( server=server, begin_wal='000000010000000000000005', begin_time=dateutil.parser.parse('Wed Jul 23 11:00:43 2014'), end_time=dateutil.parser.parse('Wed Jul 23 12:00:43 2014')) server.sync_wals() (out, err) = capsys.readouterr() assert 'WARNING: Skipping WAL synchronisation for ' \ 'server %s: no available local backup for %s' \ % (server.config.name, primary_info_content['wals'][0]['name']) in err # Test 6: simulate rsync failure. # Expect a custom error message server.get_backup = lambda x: build_test_backup_info( server=server, begin_wal='000000010000000000000002', begin_time=dateutil.parser.parse('Wed Jul 23 11:00:43 2014'), end_time=dateutil.parser.parse('Wed Jul 23 12:00:43 2014')) rsync_mock.side_effect = CommandFailedException("TestFailure") server.sync_wals() (out, err) = capsys.readouterr() # check stdout for the Custom error message assert 'TestFailure' in err # Test 7: simulate keyboard interruption rsync_mock.side_effect = KeyboardInterrupt() server.sync_wals() # control the error message for KeyboardInterrupt (out, err) = capsys.readouterr() assert 'KeyboardInterrupt' in err # Test 8: normal execution, expect no output. xlog.db # must contain information about the primary info wals # reset the rsync_moc, and remove the side_effect rsync_mock.reset_mock() rsync_mock.side_effect = mock.Mock(name='rsync') server.sync_wals() # check for no output on stdout and sterr (out, err) = capsys.readouterr() assert out == '' assert err == '' # check the xlog content for primary.info wals exp_xlog = [ '000000010000000000000002\t16777216\t1406019026.0\tNone\n', '000000010000000000000003\t16777216\t1406019026.0\tNone\n', '000000010000000000000004\t16777216\t1406019329.93\tNone\n', '000000010000000000000005\t16777216\t1406019330.84\tNone\n' ] with server.xlogdb() as fxlogdb: xlog = fxlogdb.readlines() assert xlog == exp_xlog
def test_check_replication_slot(self, postgres_mock, capsys): """ Extension of the check_postgres test. Tests the replication_slot check :param postgres_mock: mock get_remote_status function :param capsys: retrieve output from console """ postgres_mock.return_value = { 'current_xlog': None, 'archive_command': 'wal to archive', 'pgespresso_installed': None, 'server_txt_version': '9.3.1', 'data_directory': '/usr/local/postgres', 'archive_mode': 'on', 'wal_level': 'replica', 'replication_slot_support': False, 'replication_slot': None, } # Create server server = build_real_server() # Case: Postgres version < 9.4 strategy = CheckOutputStrategy() server.check_postgres(strategy) (out, err) = capsys.readouterr() assert '\treplication slot:' not in out # Case: correct configuration # use a mock as a quick disposable obj rep_slot = mock.Mock() rep_slot.slot_name = 'test' rep_slot.active = True rep_slot.restart_lsn = 'aaaBB' postgres_mock.return_value = { 'server_txt_version': '9.4.1', 'replication_slot_support': True, 'replication_slot': rep_slot, } server = build_real_server() server.config.streaming_archiver = True server.config.slot_name = 'test' server.check_postgres(strategy) (out, err) = capsys.readouterr() # Everything is ok assert '\treplication slot: OK\n' in out rep_slot.active = False rep_slot.restart_lsn = None postgres_mock.return_value = { 'server_txt_version': '9.4.1', 'replication_slot_support': True, 'replication_slot': rep_slot, } # Replication slot not initialised. server = build_real_server() server.config.slot_name = 'test' server.config.streaming_archiver = True server.check_postgres(strategy) (out, err) = capsys.readouterr() # Everything is ok assert "\treplication slot: FAILED (slot '%s' not initialised: " \ "is 'receive-wal' running?)\n" \ % server.config.slot_name in out rep_slot.reset_mock() rep_slot.active = False rep_slot.restart_lsn = 'Test' postgres_mock.return_value = { 'server_txt_version': '9.4.1', 'replication_slot_support': True, 'replication_slot': rep_slot } # Replication slot not active. server = build_real_server() server.config.slot_name = 'test' server.config.streaming_archiver = True server.check_postgres(strategy) (out, err) = capsys.readouterr() # Everything is ok assert "\treplication slot: FAILED (slot '%s' not active: " \ "is 'receive-wal' running?)\n" % server.config.slot_name in out rep_slot.reset_mock() rep_slot.active = False rep_slot.restart_lsn = 'Test' postgres_mock.return_value = { 'server_txt_version': 'PostgreSQL 9.4.1', 'replication_slot_support': True, 'replication_slot': rep_slot } # Replication slot not active with streaming_archiver off. server = build_real_server() server.config.slot_name = 'test' server.config.streaming_archiver = False server.check_postgres(strategy) (out, err) = capsys.readouterr() # Everything is ok assert "\treplication slot: OK (WARNING: slot '%s' is initialised " \ "but not required by the current config)\n" \ % server.config.slot_name in out rep_slot.reset_mock() rep_slot.active = True rep_slot.restart_lsn = 'Test' postgres_mock.return_value = { 'server_txt_version': 'PostgreSQL 9.4.1', 'replication_slot_support': True, 'replication_slot': rep_slot, } # Replication slot not active with streaming_archiver off. server = build_real_server() server.config.slot_name = 'test' server.config.streaming_archiver = False server.check_postgres(strategy) (out, err) = capsys.readouterr() # Everything is ok assert "\treplication slot: OK (WARNING: slot '%s' is active " \ "but not required by the current config)\n" \ % server.config.slot_name in out