Exemple #1
0
 def execute(self, sql, *params):
     if sql.startswith('blabla') or sql == 'CHECKPOINT':
         raise psycopg2.OperationalError()
     elif sql.startswith('RetryFailedError'):
         raise RetryFailedError('retry')
     elif sql.startswith('SELECT slot_name'):
         self.results = [('blabla', ), ('foobar', )]
     elif sql.startswith('SELECT pg_xlog_location_diff'):
         self.results = [(0, )]
     elif sql == 'SELECT pg_is_in_recovery()':
         self.results = [(False, )]
     elif sql.startswith('SELECT to_char(pg_postmaster_start_time'):
         self.results = [('', True, '', '', '', '', False)]
     else:
         self.results = [(
             None,
             None,
             None,
             None,
             None,
             None,
             None,
             None,
             None,
             None,
         )]
Exemple #2
0
 def execute(self, sql, *params):
     if sql.startswith('blabla'):
         raise psycopg2.ProgrammingError()
     elif sql == 'CHECKPOINT':
         raise psycopg2.OperationalError()
     elif sql.startswith('RetryFailedError'):
         raise RetryFailedError('retry')
     elif sql.startswith('SELECT slot_name'):
         self.results = [('blabla',), ('foobar',)]
     elif sql.startswith('SELECT CASE WHEN pg_is_in_recovery()'):
         self.results = [(2,)]
     elif sql == 'SELECT pg_is_in_recovery()':
         self.results = [(False, )]
     elif sql.startswith('WITH replication_info AS ('):
         replication_info = '[{"application_name":"walreceiver","client_addr":"1.2.3.4",' +\
                            '"state":"streaming","sync_state":"async","sync_priority":0}]'
         self.results = [('', True, '', '', '', '', False, replication_info)]
     elif sql.startswith('SELECT name, setting'):
         self.results = [('wal_segment_size', '2048', '8kB', 'integer', 'internal'),
                         ('search_path', 'public', None, 'string', 'user'),
                         ('port', '5433', None, 'integer', 'postmaster'),
                         ('listen_addresses', '*', None, 'string', 'postmaster'),
                         ('autovacuum', 'on', None, 'bool', 'sighup'),
                         ('unix_socket_directories', '/tmp', None, 'string', 'postmaster')]
     elif sql.startswith('IDENTIFY_SYSTEM'):
         self.results = [('1', 2, '0/402EEC0', '')]
     elif sql.startswith('TIMELINE_HISTORY '):
         self.results = [('', b'x\t0/40159C0\tno recovery target specified\n\n' +
                              b'1\t0/40159C0\tno recovery target specified\n\n' +
                              b'2\t0/402DD98\tno recovery target specified\n\n' +
                              b'3\t0/403DD98\tno recovery target specified\n')]
     else:
         self.results = [(None, None, None, None, None, None, None, None, None, None)]
Exemple #3
0
 def _wait_caches(self):
     stop_time = time.time() + self._retry.deadline
     while not (self._pods.is_ready() and self._kinds.is_ready()):
         timeout = stop_time - time.time()
         if timeout <= 0:
             raise RetryFailedError('Exceeded retry deadline')
         self._condition.wait(timeout)
Exemple #4
0
 def execute(self, sql, *params):
     if sql.startswith('blabla') or sql == 'CHECKPOINT':
         raise psycopg2.OperationalError()
     elif sql.startswith('RetryFailedError'):
         raise RetryFailedError('retry')
     elif sql.startswith('SELECT slot_name'):
         self.results = [('blabla', ), ('foobar', )]
     elif sql.startswith('SELECT pg_xlog_location_diff'):
         self.results = [(0, )]
     elif sql == 'SELECT pg_is_in_recovery()':
         self.results = [(False, )]
     elif sql.startswith('WITH replication_info AS ('):
         replication_info = '[{"application_name":"walreceiver","client_addr":"1.2.3.4",' +\
                            '"state":"streaming","sync_state":"async","sync_priority":0}]'
         self.results = [('', True, '', '', '', '', False, replication_info)
                         ]
     elif sql.startswith('SELECT name, setting'):
         self.results = [
             ('wal_segment_size', '2048', '8kB', 'integer', 'internal'),
             ('search_path', 'public', None, 'string', 'user'),
             ('port', '5433', None, 'integer', 'postmaster'),
             ('listen_addresses', '*', None, 'string', 'postmaster'),
             ('autovacuum', 'on', None, 'bool', 'sighup')
         ]
     else:
         self.results = [(None, None, None, None, None, None, None, None,
                          None, None)]
Exemple #5
0
 def _query(self, sql, *params):
     """We are always using the same cursor, therefore this method is not thread-safe!!!
     You can call it from different threads only if you are holding explicit `AsyncExecutor` lock,
     because the main thread is always holding this lock when running HA cycle."""
     cursor = None
     try:
         cursor = self._connection.cursor()
         cursor.execute(sql, params)
         return cursor
     except psycopg2.Error as e:
         if cursor and cursor.connection.closed == 0:
             # When connected via unix socket, psycopg2 can't recoginze 'connection lost'
             # and leaves `_cursor_holder.connection.closed == 0`, but psycopg2.OperationalError
             # is still raised (what is correct). It doesn't make sense to continiue with existing
             # connection and we will close it, to avoid its reuse by the `cursor` method.
             if isinstance(e, psycopg2.OperationalError):
                 self._connection.close()
             else:
                 raise e
         if self.state == 'restarting':
             raise RetryFailedError('cluster is being restarted')
     except PostgresConnectionException as e:
         print('Connection problem')
         print('Hint: check if there is already a postgresql server running')
         # sys.exit('Connection problems')
         raise PostgresConnectionException('connection problems')
Exemple #6
0
    def get_postgresql_status(self, retry=False):
        try:
            cluster = self.server.patroni.dcs.cluster

            if self.server.patroni.postgresql.state not in ('running', 'restarting', 'starting'):
                raise RetryFailedError('')
            stmt = ("WITH replication_info AS ("
                    "SELECT usename, application_name, client_addr, state, sync_state, sync_priority"
                    " FROM pg_catalog.pg_stat_replication) SELECT"
                    " pg_catalog.to_char(pg_catalog.pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS.MS TZ'),"
                    " CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0"
                    " ELSE ('x' || pg_catalog.substr(pg_catalog.pg_{0}file_name("
                    "pg_catalog.pg_current_{0}_{1}()), 1, 8))::bit(32)::int END,"
                    " CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0"
                    " ELSE pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}_{1}(), '0/0')::bigint END,"
                    " pg_catalog.pg_{0}_{1}_diff(COALESCE(pg_catalog.pg_last_{0}_receive_{1}(),"
                    " pg_catalog.pg_last_{0}_replay_{1}()), '0/0')::bigint,"
                    " pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(), '0/0')::bigint,"
                    " pg_catalog.to_char(pg_catalog.pg_last_xact_replay_timestamp(), 'YYYY-MM-DD HH24:MI:SS.MS TZ'),"
                    " pg_catalog.pg_is_in_recovery() AND pg_catalog.pg_is_{0}_replay_paused(), "
                    "(SELECT pg_catalog.array_to_json(pg_catalog.array_agg("
                    "pg_catalog.row_to_json(ri))) FROM replication_info ri)")

            row = self.query(stmt.format(self.server.patroni.postgresql.wal_name,
                                         self.server.patroni.postgresql.lsn_name), retry=retry)[0]

            result = {
                'state': self.server.patroni.postgresql.state,
                'postmaster_start_time': row[0],
                'role': 'replica' if row[1] == 0 else 'master',
                'server_version': self.server.patroni.postgresql.server_version,
                'cluster_unlocked': bool(not cluster or cluster.is_unlocked()),
                'xlog': ({
                    'received_location': row[3],
                    'replayed_location': row[4],
                    'replayed_timestamp': row[5],
                    'paused': row[6]} if row[1] == 0 else {
                    'location': row[2]
                })
            }

            if result['role'] == 'replica' and self.server.patroni.ha.is_standby_cluster():
                result['role'] = self.server.patroni.postgresql.role

            if row[1] > 0:
                result['timeline'] = row[1]
            else:
                leader_timeline = None if not cluster or cluster.is_unlocked() else cluster.leader.timeline
                result['timeline'] = self.server.patroni.postgresql.replica_cached_timeline(leader_timeline)

            if row[7]:
                result['replication'] = row[7]

            return result
        except (psycopg2.Error, RetryFailedError, PostgresConnectionException):
            state = self.server.patroni.postgresql.state
            if state == 'running':
                logger.exception('get_postgresql_status')
                state = 'unknown'
            return {'state': state, 'role': self.server.patroni.postgresql.role}
Exemple #7
0
    def get_postgresql_status(self, retry=False):
        try:
            if self.server.patroni.postgresql.state not in ('running',
                                                            'restarting',
                                                            'starting'):
                raise RetryFailedError('')
            row = self.query("""WITH replication_info AS (
                                    SELECT usename, application_name, client_addr, state, sync_state, sync_priority
                                      FROM pg_stat_replication
                                )
                                SELECT to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS.MS TZ'),
                                       pg_is_in_recovery(),
                                       CASE WHEN pg_is_in_recovery()
                                            THEN 0
                                            ELSE pg_{0}_{1}_diff(pg_current_{0}_{1}(), '0/0')::bigint
                                       END,
                                       pg_{0}_{1}_diff(COALESCE(pg_last_{0}_receive_{1}(),
                                                                      pg_last_{0}_replay_{1}()), '0/0')::bigint,
                                       pg_{0}_{1}_diff(pg_last_{0}_replay_{1}(), '0/0')::bigint,
                                       to_char(pg_last_xact_replay_timestamp(), 'YYYY-MM-DD HH24:MI:SS.MS TZ'),
                                       pg_is_in_recovery() AND pg_is_{0}_replay_paused(),
                                       (SELECT array_to_json(array_agg(row_to_json(ri)))
                                          FROM replication_info ri)""".format(
                self.server.patroni.postgresql.wal_name,
                self.server.patroni.postgresql.lsn_name),
                             retry=retry)[0]

            result = {
                'state':
                self.server.patroni.postgresql.state,
                'postmaster_start_time':
                row[0],
                'role':
                'replica' if row[1] else 'master',
                'server_version':
                self.server.patroni.postgresql.server_version,
                'xlog': ({
                    'received_location': row[3],
                    'replayed_location': row[4],
                    'replayed_timestamp': row[5],
                    'paused': row[6]
                } if row[1] else {
                    'location': row[2]
                })
            }

            if row[7]:
                result['replication'] = row[7]

            return result
        except (psycopg2.Error, RetryFailedError, PostgresConnectionException):
            state = self.server.patroni.postgresql.state
            if state == 'running':
                logger.exception('get_postgresql_status')
                state = 'unknown'
            return {
                'state': state,
                'role': self.server.patroni.postgresql.role
            }
Exemple #8
0
 def execute(self, sql, *params):
     if sql.startswith('blabla'):
         raise psycopg.ProgrammingError()
     elif sql == 'CHECKPOINT' or sql.startswith(
             'SELECT pg_catalog.pg_create_'):
         raise psycopg.OperationalError()
     elif sql.startswith('RetryFailedError'):
         raise RetryFailedError('retry')
     elif sql.startswith('SELECT slot_name, catalog_xmin'):
         self.results = [('postgresql0', 100), ('ls', 100)]
     elif sql.startswith(
             'SELECT slot_name, slot_type, datname, plugin, catalog_xmin'):
         self.results = [('ls', 'logical', 'a', 'b', 100, 500, b'123456')]
     elif sql.startswith('SELECT slot_name'):
         self.results = [('blabla', 'physical'), ('foobar', 'physical'),
                         ('ls', 'logical', 'a', 'b', 5, 100, 500)]
     elif sql.startswith('SELECT CASE WHEN pg_catalog.pg_is_in_recovery()'):
         self.results = [(1, 2, 1, 0, False, 1, 1, None, None, [{
             "slot_name":
             "ls",
             "confirmed_flush_lsn":
             12345
         }])]
     elif sql.startswith('SELECT pg_catalog.pg_is_in_recovery()'):
         self.results = [(False, 2)]
     elif sql.startswith('SELECT pg_catalog.pg_postmaster_start_time'):
         replication_info = '[{"application_name":"walreceiver","client_addr":"1.2.3.4",' +\
                            '"state":"streaming","sync_state":"async","sync_priority":0}]'
         now = datetime.datetime.now(tzutc)
         self.results = [(now, 0, '', 0, '', False, now, replication_info)]
     elif sql.startswith('SELECT name, setting'):
         self.results = [
             ('wal_segment_size', '2048', '8kB', 'integer', 'internal'),
             ('wal_block_size', '8192', None, 'integer', 'internal'),
             ('shared_buffers', '16384', '8kB', 'integer', 'postmaster'),
             ('wal_buffers', '-1', '8kB', 'integer', 'postmaster'),
             ('search_path', 'public', None, 'string', 'user'),
             ('port', '5433', None, 'integer', 'postmaster'),
             ('listen_addresses', '*', None, 'string', 'postmaster'),
             ('autovacuum', 'on', None, 'bool', 'sighup'),
             ('unix_socket_directories', '/tmp', None, 'string',
              'postmaster')
         ]
     elif sql.startswith('IDENTIFY_SYSTEM'):
         self.results = [('1', 3, '0/402EEC0', '')]
     elif sql.startswith('TIMELINE_HISTORY '):
         self.results = [('',
                          b'x\t0/40159C0\tno recovery target specified\n\n'
                          b'1\t0/40159C0\tno recovery target specified\n\n'
                          b'2\t0/402DD98\tno recovery target specified\n\n'
                          b'3\t0/403DD98\tno recovery target specified\n')]
     else:
         self.results = [(None, None, None, None, None, None, None, None,
                          None, None)]
Exemple #9
0
 def _query(self, sql, *params):
     cursor = None
     try:
         cursor = self._cursor()
         cursor.execute(sql, params)
         return cursor
     except psycopg2.Error as e:
         if cursor and cursor.connection.closed == 0:
             raise e
         if self.state == 'restarting':
             raise RetryFailedError('cluster is being restarted')
         raise PostgresConnectionException('connection problems')
Exemple #10
0
    def get_postgresql_status(self, retry=False):
        postgresql = self.server.patroni.postgresql
        try:
            cluster = self.server.patroni.dcs.cluster

            if postgresql.state not in ('running', 'restarting', 'starting'):
                raise RetryFailedError('')
            stmt = ("SELECT " + postgresql.POSTMASTER_START_TIME + ", " + postgresql.TL_LSN + ","
                    " pg_catalog.to_char(pg_catalog.pg_last_xact_replay_timestamp(), 'YYYY-MM-DD HH24:MI:SS.MS TZ'),"
                    " pg_catalog.array_to_json(pg_catalog.array_agg(pg_catalog.row_to_json(ri))) "
                    "FROM (SELECT (SELECT rolname FROM pg_authid WHERE oid = usesysid) AS usename,"
                    " application_name, client_addr, w.state, sync_state, sync_priority"
                    " FROM pg_catalog.pg_stat_get_wal_senders() w, pg_catalog.pg_stat_get_activity(pid)"
                    " LEFT JOIN pg_replication_slots AS rs ON rs.active_pid = pid"
                    " WHERE slot_type IS NULL OR slot_type = 'physical') AS ri")

            row = self.query(stmt.format(postgresql.wal_name, postgresql.lsn_name), retry=retry)[0]

            result = {
                'state': postgresql.state,
                'postmaster_start_time': row[0],
                'role': 'replica' if row[1] == 0 else 'master',
                'server_version': postgresql.server_version,
                'cluster_unlocked': bool(not cluster or cluster.is_unlocked()),
                'xlog': ({
                    'received_location': row[4] or row[3],
                    'replayed_location': row[3],
                    'replayed_timestamp': row[6],
                    'paused': row[5]} if row[1] == 0 else {
                    'location': row[2]
                })
            }

            if result['role'] == 'replica' and self.server.patroni.ha.is_standby_cluster():
                result['role'] = postgresql.role

            if row[1] > 0:
                result['timeline'] = row[1]
            else:
                leader_timeline = None if not cluster or cluster.is_unlocked() else cluster.leader.timeline
                result['timeline'] = postgresql.replica_cached_timeline(leader_timeline)

            if row[7]:
                result['replication'] = row[7]

            return result
        except (psycopg2.Error, RetryFailedError, PostgresConnectionException):
            state = postgresql.state
            if state == 'running':
                logger.exception('get_postgresql_status')
                state = 'unknown'
            return {'state': state, 'role': postgresql.role}
Exemple #11
0
 def execute(self, sql, *params):
     if sql.startswith('blabla') or sql == 'CHECKPOINT':
         raise psycopg2.OperationalError()
     elif sql.startswith('RetryFailedError'):
         raise RetryFailedError('retry')
     elif sql.startswith('SELECT slot_name'):
         self.results = [('blabla',), ('foobar',)]
     elif sql.startswith('SELECT pg_xlog_location_diff'):
         self.results = [(0,)]
     elif sql == 'SELECT pg_is_in_recovery()':
         self.results = [(False, )]
     elif sql.startswith('SELECT to_char(pg_postmaster_start_time'):
         self.results = [('', True, '', '', '', '', False)]
     elif sql.startswith('SELECT name, setting'):
         self.results = [('wal_segment_size', '2048', '8kB', 'integer', 'internal'),
                         ('search_path', 'public', None, 'string', 'user'),
                         ('port', '5433', None, 'integer', 'postmaster'),
                         ('listen_addresses', '*', None, 'string', 'postmaster'),
                         ('autovacuum', 'on', None, 'bool', 'sighup')]
     else:
         self.results = [(None, None, None, None, None, None, None, None, None, None)]
Exemple #12
0
class TestPostgresql(BaseTestPostgresql):
    @patch('subprocess.call', Mock(return_value=0))
    @patch('os.rename', Mock())
    @patch('patroni.postgresql.CallbackExecutor', Mock())
    @patch.object(Postgresql, 'get_major_version', Mock(return_value=130000))
    @patch.object(Postgresql, 'is_running', Mock(return_value=True))
    def setUp(self):
        super(TestPostgresql, self).setUp()
        self.p.config.write_postgresql_conf()

    @patch('subprocess.Popen')
    @patch.object(Postgresql, 'wait_for_startup')
    @patch.object(Postgresql, 'wait_for_port_open')
    @patch.object(Postgresql, 'is_running')
    @patch.object(Postgresql, 'controldata', Mock())
    def test_start(self, mock_is_running, mock_wait_for_port_open,
                   mock_wait_for_startup, mock_popen):
        mock_is_running.return_value = MockPostmaster()
        mock_wait_for_port_open.return_value = True
        mock_wait_for_startup.return_value = False
        mock_popen.return_value.stdout.readline.return_value = '123'
        self.assertTrue(self.p.start())
        mock_is_running.return_value = None

        mock_postmaster = MockPostmaster()
        with patch.object(PostmasterProcess,
                          'start',
                          return_value=mock_postmaster):
            pg_conf = os.path.join(self.p.data_dir, 'postgresql.conf')
            open(pg_conf, 'w').close()
            self.assertFalse(self.p.start(task=CriticalTask()))

            with open(pg_conf) as f:
                lines = f.readlines()
                self.assertTrue("f.oo = 'bar'\n" in lines)

            mock_wait_for_startup.return_value = None
            self.assertFalse(self.p.start(10))
            self.assertIsNone(self.p.start())

            mock_wait_for_port_open.return_value = False
            self.assertFalse(self.p.start())
            task = CriticalTask()
            task.cancel()
            self.assertFalse(self.p.start(task=task))

        self.p.cancellable.cancel()
        self.assertFalse(self.p.start())
        with patch(
                'patroni.postgresql.config.ConfigHandler.effective_configuration',
                PropertyMock(side_effect=Exception)):
            self.assertIsNone(self.p.start())

    @patch.object(Postgresql, 'pg_isready')
    @patch('patroni.postgresql.polling_loop', Mock(return_value=range(1)))
    def test_wait_for_port_open(self, mock_pg_isready):
        mock_pg_isready.return_value = STATE_NO_RESPONSE
        mock_postmaster = MockPostmaster(is_running=False)

        # No pid file and postmaster death
        self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))

        mock_postmaster.is_running.return_value = True

        # timeout
        self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))

        # pg_isready failure
        mock_pg_isready.return_value = 'garbage'
        self.assertTrue(self.p.wait_for_port_open(mock_postmaster, 1))

        # cancelled
        self.p.cancellable.cancel()
        self.assertFalse(self.p.wait_for_port_open(mock_postmaster, 1))

    @patch('time.sleep', Mock())
    @patch.object(Postgresql, 'is_running')
    @patch.object(Postgresql, '_wait_for_connection_close', Mock())
    def test_stop(self, mock_is_running):
        # Postmaster is not running
        mock_callback = Mock()
        mock_is_running.return_value = None
        self.assertTrue(self.p.stop(on_safepoint=mock_callback))
        mock_callback.assert_called()

        # Is running, stopped successfully
        mock_is_running.return_value = mock_postmaster = MockPostmaster()
        mock_callback.reset_mock()
        self.assertTrue(self.p.stop(on_safepoint=mock_callback))
        mock_callback.assert_called()
        mock_postmaster.signal_stop.assert_called()

        # Timed out waiting for fast shutdown triggers immediate shutdown
        mock_postmaster.wait.side_effect = [
            psutil.TimeoutExpired(30),
            psutil.TimeoutExpired(30),
            Mock()
        ]
        mock_callback.reset_mock()
        self.assertTrue(
            self.p.stop(on_safepoint=mock_callback, stop_timeout=30))
        mock_callback.assert_called()
        mock_postmaster.signal_stop.assert_called()

        # Immediate shutdown succeeded
        mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), Mock()]
        self.assertTrue(
            self.p.stop(on_safepoint=mock_callback, stop_timeout=30))

        # Stop signal failed
        mock_postmaster.signal_stop.return_value = False
        self.assertFalse(self.p.stop())

        # Stop signal failed to find process
        mock_postmaster.signal_stop.return_value = True
        mock_callback.reset_mock()
        self.assertTrue(self.p.stop(on_safepoint=mock_callback))
        mock_callback.assert_called()

        # Fast shutdown is timed out but when immediate postmaster is already gone
        mock_postmaster.wait.side_effect = [psutil.TimeoutExpired(30), Mock()]
        mock_postmaster.signal_stop.side_effect = [None, True]
        self.assertTrue(
            self.p.stop(on_safepoint=mock_callback, stop_timeout=30))

    def test_restart(self):
        self.p.start = Mock(return_value=False)
        self.assertFalse(self.p.restart())
        self.assertEqual(self.p.state, 'restart failed (restarting)')

    @patch('os.chmod', Mock())
    @patch.object(builtins, 'open', MagicMock())
    def test_write_pgpass(self):
        self.p.config.write_pgpass({
            'host': 'localhost',
            'port': '5432',
            'user': '******'
        })
        self.p.config.write_pgpass({
            'host': 'localhost',
            'port': '5432',
            'user': '******',
            'password': '******'
        })

    def test_checkpoint(self):
        with patch.object(MockCursor, 'fetchone', Mock(return_value=(True, ))):
            self.assertEqual(self.p.checkpoint({'user': '******'}),
                             'is_in_recovery=true')
        with patch.object(MockCursor, 'execute', Mock(return_value=None)):
            self.assertIsNone(self.p.checkpoint())
        self.assertEqual(self.p.checkpoint(timeout=10),
                         'not accessible or not healty')

    @patch('patroni.postgresql.config.mtime', mock_mtime)
    @patch('patroni.postgresql.config.ConfigHandler._get_pg_settings')
    def test_check_recovery_conf(self, mock_get_pg_settings):
        mock_get_pg_settings.return_value = {
            'primary_conninfo': [
                'primary_conninfo', 'foo=', None, 'string', 'postmaster',
                self.p.config._auto_conf
            ],
            'recovery_min_apply_delay': [
                'recovery_min_apply_delay', '0', 'ms', 'integer', 'sighup',
                'foo'
            ]
        }
        self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
        self.p.config.write_recovery_conf({'standby_mode': 'on'})
        self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
        mock_get_pg_settings.return_value['primary_conninfo'][1] = ''
        mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '1'
        self.assertEqual(self.p.config.check_recovery_conf(None),
                         (False, False))
        mock_get_pg_settings.return_value['recovery_min_apply_delay'][
            5] = self.p.config._auto_conf
        self.assertEqual(self.p.config.check_recovery_conf(None),
                         (True, False))
        mock_get_pg_settings.return_value['recovery_min_apply_delay'][1] = '0'
        self.assertEqual(self.p.config.check_recovery_conf(None),
                         (False, False))
        conninfo = {'host': '1', 'password': '******'}
        with patch(
                'patroni.postgresql.config.ConfigHandler.primary_conninfo_params',
                Mock(return_value=conninfo)):
            mock_get_pg_settings.return_value['recovery_min_apply_delay'][
                1] = '1'
            self.assertEqual(self.p.config.check_recovery_conf(None),
                             (True, True))
            mock_get_pg_settings.return_value['primary_conninfo'][1] = 'host=1 passfile='\
                + re.sub(r'([\'\\ ])', r'\\\1', self.p.config._pgpass)
            mock_get_pg_settings.return_value['recovery_min_apply_delay'][
                1] = '0'
            self.assertEqual(self.p.config.check_recovery_conf(None),
                             (True, True))
            self.p.config.write_recovery_conf({
                'standby_mode':
                'on',
                'primary_conninfo':
                conninfo.copy()
            })
            self.p.config.write_postgresql_conf()
            self.assertEqual(self.p.config.check_recovery_conf(None),
                             (False, False))
            with patch.object(Postgresql, 'primary_conninfo',
                              Mock(return_value='host=1')):
                mock_get_pg_settings.return_value['primary_slot_name'] = [
                    'primary_slot_name', '', '', 'string', 'postmaster',
                    self.p.config._postgresql_conf
                ]
                self.assertEqual(self.p.config.check_recovery_conf(None),
                                 (True, True))

    @patch.object(Postgresql, 'major_version',
                  PropertyMock(return_value=120000))
    @patch.object(Postgresql, 'is_running', MockPostmaster)
    @patch.object(MockPostmaster,
                  'create_time',
                  Mock(return_value=1234567),
                  create=True)
    @patch('patroni.postgresql.config.ConfigHandler._get_pg_settings')
    def test__read_recovery_params(self, mock_get_pg_settings):
        mock_get_pg_settings.return_value = {
            'primary_conninfo': [
                'primary_conninfo', '', None, 'string', 'postmaster',
                self.p.config._postgresql_conf
            ]
        }
        self.p.config.write_recovery_conf({
            'standby_mode': 'on',
            'primary_conninfo': {
                'password': '******'
            }
        })
        self.p.config.write_postgresql_conf()
        self.assertEqual(self.p.config.check_recovery_conf(None),
                         (False, False))
        self.assertEqual(self.p.config.check_recovery_conf(None),
                         (False, False))
        mock_get_pg_settings.side_effect = Exception
        with patch('patroni.postgresql.config.mtime', mock_mtime):
            self.assertEqual(self.p.config.check_recovery_conf(None),
                             (True, True))

    @patch.object(Postgresql, 'major_version',
                  PropertyMock(return_value=100000))
    @patch.object(Postgresql, 'primary_conninfo', Mock(return_value='host=1'))
    def test__read_recovery_params_pre_v12(self):
        self.p.config.write_recovery_conf({
            'standby_mode': 'off',
            'primary_conninfo': {
                'password': '******'
            }
        })
        self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
        self.assertEqual(self.p.config.check_recovery_conf(None), (True, True))
        self.p.config.write_recovery_conf({'restore_command': '\n'})
        with patch('patroni.postgresql.config.mtime', mock_mtime):
            self.assertEqual(self.p.config.check_recovery_conf(None),
                             (True, True))

    def test_write_postgresql_and_sanitize_auto_conf(self):
        read_data = 'primary_conninfo = foo\nfoo = bar\n'
        with open(os.path.join(self.p.data_dir, 'postgresql.auto.conf'),
                  'w') as f:
            f.write(read_data)

        mock_read_auto = mock_open(read_data=read_data)
        mock_read_auto.return_value.__iter__ = lambda o: iter(o.readline, '')
        with patch.object(builtins, 'open', Mock(side_effect=[mock_open()(), mock_read_auto(), IOError])),\
                patch('os.chmod', Mock()):
            self.p.config.write_postgresql_conf()

        with patch.object(builtins, 'open',
                          Mock(side_effect=[mock_open()(), IOError])), patch(
                              'os.chmod', Mock()):
            self.p.config.write_postgresql_conf()
        self.p.config.write_recovery_conf({'foo': 'bar'})
        self.p.config.write_postgresql_conf()

    @patch.object(Postgresql, 'is_running', Mock(return_value=False))
    @patch.object(Postgresql, 'start', Mock())
    def test_follow(self):
        self.p.call_nowait('on_start')
        m = RemoteMember(
            '1', {
                'restore_command': '2',
                'primary_slot_name': 'foo',
                'conn_kwargs': {
                    'host': 'bar'
                }
            })
        self.p.follow(m)

    @patch.object(Postgresql, 'is_running', Mock(return_value=True))
    def test_sync_replication_slots(self):
        self.p.start()
        config = ClusterConfig(
            1, {
                'slots': {
                    'test_3': {
                        'database': 'a',
                        'plugin': 'b'
                    },
                    'A': 0,
                    'ls': 0,
                    'b': {
                        'type': 'logical',
                        'plugin': '1'
                    }
                },
                'ignore_slots': [{
                    'name': 'blabla'
                }]
            }, 1)
        cluster = Cluster(True, config, self.leader, 0,
                          [self.me, self.other, self.leadermem], None, None,
                          None)
        with mock.patch('patroni.postgresql.Postgresql._query',
                        Mock(side_effect=psycopg2.OperationalError)):
            self.p.slots_handler.sync_replication_slots(cluster)
        self.p.slots_handler.sync_replication_slots(cluster)
        with mock.patch('patroni.postgresql.Postgresql.role',
                        new_callable=PropertyMock(return_value='replica')):
            self.p.slots_handler.sync_replication_slots(cluster)
        with patch.object(SlotsHandler, 'drop_replication_slot', Mock(return_value=True)),\
                patch('patroni.dcs.logger.error', new_callable=Mock()) as errorlog_mock:
            alias1 = Member(
                0, 'test-3', 28, {
                    'conn_url':
                    'postgres://*****:*****@127.0.0.1:5436/postgres'
                })
            alias2 = Member(
                0, 'test.3', 28, {
                    'conn_url':
                    'postgres://*****:*****@127.0.0.1:5436/postgres'
                })
            cluster.members.extend([alias1, alias2])
            self.p.slots_handler.sync_replication_slots(cluster)
            self.assertEqual(errorlog_mock.call_count, 5)
            ca = errorlog_mock.call_args_list[0][0][1]
            self.assertTrue("test-3" in ca, "non matching {0}".format(ca))
            self.assertTrue("test.3" in ca, "non matching {0}".format(ca))

    @patch.object(MockCursor, 'execute',
                  Mock(side_effect=psycopg2.OperationalError))
    def test__query(self):
        self.assertRaises(PostgresConnectionException, self.p._query, 'blabla')
        self.p._state = 'restarting'
        self.assertRaises(RetryFailedError, self.p._query, 'blabla')

    def test_query(self):
        self.p.query('select 1')
        self.assertRaises(PostgresConnectionException, self.p.query,
                          'RetryFailedError')
        self.assertRaises(psycopg2.ProgrammingError, self.p.query, 'blabla')

    @patch.object(Postgresql, 'pg_isready', Mock(return_value=STATE_REJECT))
    def test_is_leader(self):
        self.assertTrue(self.p.is_leader())
        self.p.reset_cluster_info_state()
        with patch.object(Postgresql, '_query',
                          Mock(side_effect=RetryFailedError(''))):
            self.assertRaises(PostgresConnectionException, self.p.is_leader)

    @patch.object(Postgresql, 'controldata',
                  Mock(
                      return_value={
                          'Database cluster state': 'shut down',
                          'Latest checkpoint location': 'X/678'
                      }))
    def test_latest_checkpoint_location(self):
        self.assertIsNone(self.p.latest_checkpoint_location())

    def test_reload(self):
        self.assertTrue(self.p.reload())

    @patch.object(Postgresql, 'is_running')
    def test_is_healthy(self, mock_is_running):
        mock_is_running.return_value = True
        self.assertTrue(self.p.is_healthy())
        mock_is_running.return_value = False
        self.assertFalse(self.p.is_healthy())

    @patch('psutil.Popen')
    def test_promote(self, mock_popen):
        mock_popen.return_value.wait.return_value = 0
        task = CriticalTask()
        self.assertTrue(self.p.promote(0, task))

        self.p.set_role('replica')
        self.p.config._config['pre_promote'] = 'test'
        with patch(
                'patroni.postgresql.cancellable.CancellableSubprocess.is_cancelled',
                PropertyMock(return_value=1)):
            self.assertFalse(self.p.promote(0, task))

        mock_popen.side_effect = Exception
        self.assertFalse(self.p.promote(0, task))
        task.reset()
        task.cancel()
        self.assertFalse(self.p.promote(0, task))

    def test_timeline_wal_position(self):
        self.assertEqual(self.p.timeline_wal_position(), (1, 2, 1))
        Thread(target=self.p.timeline_wal_position).start()

    @patch.object(PostmasterProcess, 'from_pidfile')
    def test_is_running(self, mock_frompidfile):
        # Cached postmaster running
        mock_postmaster = self.p._postmaster_proc = MockPostmaster()
        self.assertEqual(self.p.is_running(), mock_postmaster)

        # Cached postmaster not running, no postmaster running
        mock_postmaster.is_running.return_value = False
        mock_frompidfile.return_value = None
        self.assertEqual(self.p.is_running(), None)
        self.assertEqual(self.p._postmaster_proc, None)

        # No cached postmaster, postmaster running
        mock_frompidfile.return_value = mock_postmaster2 = MockPostmaster()
        self.assertEqual(self.p.is_running(), mock_postmaster2)
        self.assertEqual(self.p._postmaster_proc, mock_postmaster2)

    @patch('shlex.split', Mock(side_effect=OSError))
    def test_call_nowait(self):
        self.p.set_role('replica')
        self.assertIsNone(self.p.call_nowait('on_start'))
        self.p.bootstrapping = True
        self.assertIsNone(self.p.call_nowait('on_start'))

    def test_non_existing_callback(self):
        self.assertFalse(self.p.call_nowait('foobar'))

    @patch.object(Postgresql, 'is_running',
                  Mock(return_value=MockPostmaster()))
    def test_is_leader_exception(self):
        self.p.start()
        self.p.query = Mock(
            side_effect=psycopg2.OperationalError("not supported"))
        self.assertTrue(self.p.stop())

    @patch('os.rename', Mock())
    @patch('os.path.isdir', Mock(return_value=True))
    @patch('os.unlink', Mock())
    @patch('os.symlink', Mock())
    @patch('patroni.postgresql.Postgresql.pg_wal_realpath',
           Mock(return_value={'pg_wal': '/mnt/pg_wal'}))
    @patch('patroni.postgresql.Postgresql.pg_tblspc_realpaths',
           Mock(return_value={'42': '/mnt/tablespaces/archive'}))
    def test_move_data_directory(self):
        self.p.move_data_directory()
        with patch('os.rename', Mock(side_effect=OSError)):
            self.p.move_data_directory()

    @patch('os.listdir', Mock(return_value=['recovery.conf']))
    @patch('os.path.exists', Mock(return_value=True))
    @patch.object(Postgresql, 'controldata', Mock())
    def test_get_postgres_role_from_data_directory(self):
        self.assertEqual(self.p.get_postgres_role_from_data_directory(),
                         'replica')

    def test_remove_data_directory(self):
        def _symlink(src, dst):
            if os.name != 'nt':  # os.symlink under Windows needs admin rights skip it
                os.symlink(src, dst)

        os.makedirs(os.path.join(self.p.data_dir, 'foo'))
        _symlink('foo', os.path.join(self.p.data_dir, 'pg_wal'))
        os.makedirs(os.path.join(self.p.data_dir, 'foo_tsp'))
        pg_tblspc = os.path.join(self.p.data_dir, 'pg_tblspc')
        os.makedirs(pg_tblspc)
        _symlink('../foo_tsp', os.path.join(pg_tblspc, '12345'))
        self.p.remove_data_directory()
        open(self.p.data_dir, 'w').close()
        self.p.remove_data_directory()
        _symlink('unexisting', self.p.data_dir)
        with patch('os.unlink', Mock(side_effect=OSError)):
            self.p.remove_data_directory()
        self.p.remove_data_directory()

    @patch('patroni.postgresql.Postgresql._version_file_exists',
           Mock(return_value=True))
    def test_controldata(self):
        with patch('subprocess.check_output',
                   Mock(return_value=0, side_effect=pg_controldata_string)):
            data = self.p.controldata()
            self.assertEqual(len(data), 50)
            self.assertEqual(data['Database cluster state'],
                             'shut down in recovery')
            self.assertEqual(data['wal_log_hints setting'], 'on')
            self.assertEqual(int(data['Database block size']), 8192)

        with patch('subprocess.check_output',
                   Mock(side_effect=subprocess.CalledProcessError(1, ''))):
            self.assertEqual(self.p.controldata(), {})

    @patch('patroni.postgresql.Postgresql._version_file_exists',
           Mock(return_value=True))
    @patch('subprocess.check_output',
           MagicMock(return_value=0, side_effect=pg_controldata_string))
    def test_sysid(self):
        self.assertEqual(self.p.sysid, "6200971513092291716")

    @patch('os.path.isfile', Mock(return_value=True))
    @patch('shutil.copy', Mock(side_effect=IOError))
    def test_save_configuration_files(self):
        self.p.config.save_configuration_files()

    @patch('os.path.isfile', Mock(side_effect=[False, True]))
    @patch('shutil.copy', Mock(side_effect=IOError))
    def test_restore_configuration_files(self):
        self.p.config.restore_configuration_files()

    def test_can_create_replica_without_replication_connection(self):
        self.p.config._config['create_replica_method'] = []
        self.assertFalse(
            self.p.can_create_replica_without_replication_connection())
        self.p.config._config['create_replica_method'] = ['wale', 'basebackup']
        self.p.config._config['wale'] = {'command': 'foo', 'no_master': 1}
        self.assertTrue(
            self.p.can_create_replica_without_replication_connection())

    def test_replica_method_can_work_without_replication_connection(self):
        self.assertFalse(
            self.p.replica_method_can_work_without_replication_connection(
                'basebackup'))
        self.assertFalse(
            self.p.replica_method_can_work_without_replication_connection(
                'foobar'))
        self.p.config._config['foo'] = {'command': 'bar', 'no_master': 1}
        self.assertTrue(
            self.p.replica_method_can_work_without_replication_connection(
                'foo'))
        self.p.config._config['foo'] = {'command': 'bar'}
        self.assertFalse(
            self.p.replica_method_can_work_without_replication_connection(
                'foo'))

    @patch('time.sleep', Mock())
    @patch.object(Postgresql, 'is_running', Mock(return_value=True))
    @patch.object(MockCursor, 'fetchone')
    def test_reload_config(self, mock_fetchone):
        mock_fetchone.return_value = (1, )
        parameters = self._PARAMETERS.copy()
        parameters.pop('f.oo')
        parameters['wal_buffers'] = '512'
        config = {
            'pg_hba': [''],
            'pg_ident': [''],
            'use_unix_socket': True,
            'authentication': {},
            'retry_timeout': 10,
            'listen': '*',
            'krbsrvname': 'postgres',
            'parameters': parameters
        }
        self.p.reload_config(config)
        mock_fetchone.side_effect = Exception
        parameters['b.ar'] = 'bar'
        self.p.reload_config(config)
        parameters['autovacuum'] = 'on'
        self.p.reload_config(config)
        parameters['autovacuum'] = 'off'
        parameters.pop('search_path')
        config['listen'] = '*:5433'
        self.p.reload_config(config)
        parameters['unix_socket_directories'] = '.'
        self.p.reload_config(config)
        self.p.config.resolve_connection_addresses()

    @patch.object(Postgresql, '_version_file_exists', Mock(return_value=True))
    def test_get_major_version(self):
        with patch.object(builtins, 'open', mock_open(read_data='9.4')):
            self.assertEqual(self.p.get_major_version(), 90400)
        with patch.object(builtins, 'open', Mock(side_effect=Exception)):
            self.assertEqual(self.p.get_major_version(), 0)

    def test_postmaster_start_time(self):
        with patch.object(
                MockCursor, "fetchone",
                Mock(return_value=('foo', True, '', '', '', '', False))):
            self.assertEqual(self.p.postmaster_start_time(), 'foo')
            t = Thread(target=self.p.postmaster_start_time)
            t.start()
            t.join()

        with patch.object(MockCursor, "execute", side_effect=psycopg2.Error):
            self.assertIsNone(self.p.postmaster_start_time())

    def test_check_for_startup(self):
        with patch('subprocess.call', return_value=0):
            self.p._state = 'starting'
            self.assertFalse(self.p.check_for_startup())
            self.assertEqual(self.p.state, 'running')

        with patch('subprocess.call', return_value=1):
            self.p._state = 'starting'
            self.assertTrue(self.p.check_for_startup())
            self.assertEqual(self.p.state, 'starting')

        with patch('subprocess.call', return_value=2):
            self.p._state = 'starting'
            self.assertFalse(self.p.check_for_startup())
            self.assertEqual(self.p.state, 'start failed')

        with patch('subprocess.call', return_value=0):
            self.p._state = 'running'
            self.assertFalse(self.p.check_for_startup())
            self.assertEqual(self.p.state, 'running')

        with patch('subprocess.call', return_value=127):
            self.p._state = 'running'
            self.assertFalse(self.p.check_for_startup())
            self.assertEqual(self.p.state, 'running')

            self.p._state = 'starting'
            self.assertFalse(self.p.check_for_startup())
            self.assertEqual(self.p.state, 'running')

    def test_wait_for_startup(self):
        state = {'sleeps': 0, 'num_rejects': 0, 'final_return': 0}
        self.__thread_ident = current_thread().ident

        def increment_sleeps(*args):
            if current_thread().ident == self.__thread_ident:
                print("Sleep")
                state['sleeps'] += 1

        def isready_return(*args):
            ret = 1 if state['sleeps'] < state['num_rejects'] else state[
                'final_return']
            print("Isready {0} {1}".format(ret, state))
            return ret

        def time_in_state(*args):
            return state['sleeps']

        with patch('subprocess.call', side_effect=isready_return):
            with patch('time.sleep', side_effect=increment_sleeps):
                self.p.time_in_state = Mock(side_effect=time_in_state)

                self.p._state = 'stopped'
                self.assertTrue(self.p.wait_for_startup())
                self.assertEqual(state['sleeps'], 0)

                self.p._state = 'starting'
                state['num_rejects'] = 5
                self.assertTrue(self.p.wait_for_startup())
                self.assertEqual(state['sleeps'], 5)

                self.p._state = 'starting'
                state['sleeps'] = 0
                state['final_return'] = 2
                self.assertFalse(self.p.wait_for_startup())

                self.p._state = 'starting'
                state['sleeps'] = 0
                state['final_return'] = 0
                self.assertFalse(self.p.wait_for_startup(timeout=2))
                self.assertEqual(state['sleeps'], 3)

        with patch.object(Postgresql, 'check_startup_state_changed',
                          Mock(return_value=False)):
            self.p.cancellable.cancel()
            self.p._state = 'starting'
            self.assertIsNone(self.p.wait_for_startup())

    def test_pick_sync_standby(self):
        cluster = Cluster(True, None, self.leader, 0,
                          [self.me, self.other, self.leadermem], None,
                          SyncState(0, self.me.name,
                                    self.leadermem.name), None)
        mock_cursor = Mock()
        mock_cursor.fetchone.return_value = ('remote_apply', )

        with patch.object(Postgresql,
                          "query",
                          side_effect=[
                              mock_cursor,
                              [(self.leadermem.name, 'sync', 1),
                               (self.me.name, 'async', 2),
                               (self.other.name, 'async', 2)]
                          ]):
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([self.leadermem.name], [self.leadermem.name]))

        with patch.object(Postgresql,
                          "query",
                          side_effect=[
                              mock_cursor,
                              [(self.leadermem.name, 'potential', 1),
                               (self.me.name, 'async', 2),
                               (self.other.name, 'async', 2)]
                          ]):
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([self.leadermem.name], []))

        with patch.object(Postgresql,
                          "query",
                          side_effect=[
                              mock_cursor,
                              [(self.me.name, 'async', 1),
                               (self.other.name, 'async', 2)]
                          ]):
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([self.me.name], []))

        with patch.object(Postgresql,
                          "query",
                          side_effect=[
                              mock_cursor,
                              [('missing', 'sync', 1),
                               (self.me.name, 'async', 2),
                               (self.other.name, 'async', 3)]
                          ]):
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([self.me.name], []))

        with patch.object(Postgresql, "query", side_effect=[mock_cursor, []]):
            self.p._major_version = 90400
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([], []))

    def test_set_sync_standby(self):
        def value_in_conf():
            with open(os.path.join(self.p.data_dir, 'postgresql.conf')) as f:
                for line in f:
                    if line.startswith('synchronous_standby_names'):
                        return line.strip()

        mock_reload = self.p.reload = Mock()
        self.p.config.set_synchronous_standby(['n1'])
        self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")
        mock_reload.assert_called()

        mock_reload.reset_mock()
        self.p.config.set_synchronous_standby(['n1'])
        mock_reload.assert_not_called()
        self.assertEqual(value_in_conf(), "synchronous_standby_names = 'n1'")

        self.p.config.set_synchronous_standby(['n1', 'n2'])
        mock_reload.assert_called()
        self.assertEqual(value_in_conf(),
                         "synchronous_standby_names = '2 (n1,n2)'")

        mock_reload.reset_mock()
        self.p.config.set_synchronous_standby([])
        mock_reload.assert_called()
        self.assertEqual(value_in_conf(), None)

    def test_get_server_parameters(self):
        config = {
            'synchronous_mode': True,
            'parameters': {
                'wal_level': 'hot_standby'
            },
            'listen': '0'
        }
        self.p.config.get_server_parameters(config)
        config['synchronous_mode_strict'] = True
        self.p.config.get_server_parameters(config)
        self.p.config.set_synchronous_standby('foo')
        self.assertTrue(
            str(self.p.config.get_server_parameters(config)).startswith('{'))

    @patch('time.sleep', Mock())
    def test__wait_for_connection_close(self):
        mock_postmaster = MockPostmaster()
        with patch.object(Postgresql, 'is_running',
                          Mock(return_value=mock_postmaster)):
            mock_postmaster.is_running.side_effect = [True, False, False]
            mock_callback = Mock()
            self.p.stop(on_safepoint=mock_callback)

            mock_postmaster.is_running.side_effect = [True, False, False]
            with patch.object(MockCursor, "execute",
                              Mock(side_effect=psycopg2.Error)):
                self.p.stop(on_safepoint=mock_callback)

    def test_terminate_starting_postmaster(self):
        mock_postmaster = MockPostmaster()
        self.p.terminate_starting_postmaster(mock_postmaster)
        mock_postmaster.signal_stop.assert_called()
        mock_postmaster.wait.assert_called()

    def test_replica_cached_timeline(self):
        self.assertEqual(self.p.replica_cached_timeline(2), 3)

    def test_get_master_timeline(self):
        self.assertEqual(self.p.get_master_timeline(), 1)

    @patch.object(Postgresql, 'get_postgres_role_from_data_directory',
                  Mock(return_value='replica'))
    def test__build_effective_configuration(self):
        with patch.object(
                Postgresql, 'controldata',
                Mock(
                    return_value={
                        'max_connections setting': '200',
                        'max_worker_processes setting': '20',
                        'max_locks_per_xact setting': '100',
                        'max_wal_senders setting': 10
                    })):
            self.p.cancellable.cancel()
            self.assertFalse(self.p.start())
            self.assertTrue(self.p.pending_restart)

    @patch('os.path.exists', Mock(return_value=True))
    @patch('os.path.isfile', Mock(return_value=False))
    def test_pgpass_is_dir(self):
        self.assertRaises(PatroniException, self.setUp)

    @patch.object(Postgresql, '_query', Mock(side_effect=RetryFailedError('')))
    def test_received_timeline(self):
        self.p.set_role('standby_leader')
        self.p.reset_cluster_info_state()
        self.assertRaises(PostgresConnectionException,
                          self.p.received_timeline)

    def test__write_recovery_params(self):
        self.p.config._write_recovery_params(
            Mock(), {'pause_at_recovery_target': 'false'})
        with patch.object(Postgresql, 'major_version',
                          PropertyMock(return_value=90400)):
            self.p.config._write_recovery_params(
                Mock(), {'recovery_target_action': 'PROMOTE'})
Exemple #13
0
 def test_is_leader(self):
     self.assertTrue(self.p.is_leader())
     self.p.reset_cluster_info_state()
     with patch.object(Postgresql, '_query',
                       Mock(side_effect=RetryFailedError(''))):
         self.assertRaises(PostgresConnectionException, self.p.is_leader)
 def test_is_leader(self):
     self.assertTrue(self.p.is_leader())
     self.p.reset_cluster_info_state(None)
     with patch.object(Postgresql, '_query',
                       Mock(side_effect=RetryFailedError(''))):
         self.assertFalse(self.p.is_leader())
Exemple #15
0
 def test_on_role_change(self):
     self.assertTrue(self.conn.on_role_change('master'))
     self.conn.retry = Mock(side_effect=RetryFailedError("retry failed"))
     self.assertFalse(self.conn.on_role_change('master'))