def _load_cluster(self):
        try:
            path = self.client_path('/')
            _, results = self._client.kv.get(path, recurse=True)

            if results is None:
                raise NotFound

            nodes = {}
            for node in results:
                node['Value'] = (node['Value'] or b'').decode('utf-8')
                nodes[os.path.relpath(node['Key'], path)] = node

            # get initialize flag
            initialize = nodes.get(self._INITIALIZE)
            initialize = initialize and initialize['Value']

            # get last leader operation
            last_leader_operation = nodes.get(self._LEADER_OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(
                last_leader_operation['Value'])

            # get list of members
            members = [
                self.member(n) for k, n in nodes.items()
                if k.startswith(self._MEMBERS) and k.count('/') == 1
            ]

            # get leader
            leader = nodes.get(self._LEADER)
            if leader and leader[
                    'Value'] == self._name and self._session != leader.get(
                        'Session', 'x'):
                logger.info(
                    'I am leader but not owner of the session. Removing leader node'
                )
                self._client.kv.delete(self.leader_path,
                                       cas=leader['ModifyIndex'])
                leader = None

            if leader:
                member = Member(-1, leader['Value'], None, {})
                member = ([m for m in members if m.name == leader['Value']]
                          or [member])[0]
                leader = Leader(leader['ModifyIndex'], leader.get('Session'),
                                member)

            # failover key
            failover = nodes.get(self._FAILOVER)
            if failover:
                failover = Failover.from_node(failover['ModifyIndex'],
                                              failover['Value'])

            self._cluster = Cluster(initialize, leader, last_leader_operation,
                                    members, failover)
        except NotFound:
            self._cluster = Cluster(False, None, None, [], None)
        except:
            logger.exception('get_cluster')
            raise ConsulError('Consul is not responding properly')
Example #2
0
    def _load_cluster(self):
        try:
            result = self.retry(self._client.read, self.client_path(''), recursive=True)
            nodes = {os.path.relpath(node.key, result.key): node for node in result.leaves}

            # get initialize flag
            initialize = nodes.get(self._INITIALIZE)
            initialize = initialize and initialize.value

            # get last leader operation
            last_leader_operation = nodes.get(self._LEADER_OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(last_leader_operation.value)

            # get list of members
            members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1]

            # get leader
            leader = nodes.get(self._LEADER)
            if leader:
                member = Member(-1, leader.value, None, {})
                member = ([m for m in members if m.name == leader.value] or [member])[0]
                leader = Leader(leader.modifiedIndex, leader.ttl, member)

            # failover key
            failover = nodes.get(self._FAILOVER)
            if failover:
                failover = Failover.from_node(failover.modifiedIndex, failover.value)

            self._cluster = Cluster(initialize, leader, last_leader_operation, members, failover)
        except etcd.EtcdKeyNotFound:
            self._cluster = Cluster(False, None, None, [], None)
        except:
            logger.exception('get_cluster')
            raise EtcdError('Etcd is not responding properly')
Example #3
0
    def _load_cluster(self):
        try:
            path = self.client_path('/')
            _, results = self.retry(self._client.kv.get, path, recurse=True)

            if results is None:
                raise NotFound

            nodes = {}
            for node in results:
                node['Value'] = (node['Value'] or b'').decode('utf-8')
                nodes[node['Key'][len(path):].lstrip('/')] = node

            # get initialize flag
            initialize = nodes.get(self._INITIALIZE)
            initialize = initialize and initialize['Value']

            # get global dynamic configuration
            config = nodes.get(self._CONFIG)
            config = config and ClusterConfig.from_node(config['ModifyIndex'], config['Value'])

            # get timeline history
            history = nodes.get(self._HISTORY)
            history = history and TimelineHistory.from_node(history['ModifyIndex'], history['Value'])

            # get last leader operation
            last_leader_operation = nodes.get(self._LEADER_OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(last_leader_operation['Value'])

            # get list of members
            members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1]

            # get leader
            leader = nodes.get(self._LEADER)
            if not self._ctl and leader and leader['Value'] == self._name \
                    and self._session != leader.get('Session', 'x'):
                logger.info('I am leader but not owner of the session. Removing leader node')
                self._client.kv.delete(self.leader_path, cas=leader['ModifyIndex'])
                leader = None

            if leader:
                member = Member(-1, leader['Value'], None, {})
                member = ([m for m in members if m.name == leader['Value']] or [member])[0]
                leader = Leader(leader['ModifyIndex'], leader.get('Session'), member)

            # failover key
            failover = nodes.get(self._FAILOVER)
            if failover:
                failover = Failover.from_node(failover['ModifyIndex'], failover['Value'])

            # get synchronization state
            sync = nodes.get(self._SYNC)
            sync = SyncState.from_node(sync and sync['ModifyIndex'], sync and sync['Value'])

            self._cluster = Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history)
        except NotFound:
            self._cluster = Cluster(None, None, None, None, [], None, None, None)
        except Exception:
            logger.exception('get_cluster')
            raise ConsulError('Consul is not responding properly')
Example #4
0
 def test__ensure_logical_slots_replica(self):
     self.p.set_role('replica')
     config = ClusterConfig(
         1, {'slots': {
             'ls': {
                 'database': 'a',
                 'plugin': 'b'
             }
         }}, 1)
     cluster = Cluster(True, config, self.leader, 0,
                       [self.me, self.other, self.leadermem], None, None,
                       None, {'ls': 12346})
     self.assertEqual(self.s.sync_replication_slots(cluster, False), [])
     self.s._schedule_load_slots = False
     with patch.object(MockCursor, 'execute',
                       Mock(side_effect=psycopg2.OperationalError)):
         self.assertEqual(self.s.sync_replication_slots(cluster, False), [])
     cluster.slots['ls'] = 'a'
     self.assertEqual(self.s.sync_replication_slots(cluster, False), [])
     with patch.object(MockCursor,
                       'rowcount',
                       PropertyMock(return_value=1),
                       create=True):
         self.assertEqual(self.s.sync_replication_slots(cluster, False),
                          ['ls'])
Example #5
0
    def _load_cluster(self):
        try:
            result = self.retry(self._client.read,
                                self.client_path(''),
                                recursive=True)
            nodes = {
                os.path.relpath(node.key, result.key): node
                for node in result.leaves
            }

            # get initialize flag
            initialize = nodes.get(self._INITIALIZE)
            initialize = initialize and initialize.value

            # get global dynamic configuration
            config = nodes.get(self._CONFIG)
            config = config and ClusterConfig.from_node(
                config.modifiedIndex, config.value)

            # get last leader operation
            last_leader_operation = nodes.get(self._LEADER_OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(
                last_leader_operation.value)

            # get list of members
            members = [
                self.member(n) for k, n in nodes.items()
                if k.startswith(self._MEMBERS) and k.count('/') == 1
            ]

            # get leader
            leader = nodes.get(self._LEADER)
            if leader:
                member = Member(-1, leader.value, None, {})
                member = ([m for m in members if m.name == leader.value]
                          or [member])[0]
                index = result.etcd_index if result.etcd_index > leader.modifiedIndex else leader.modifiedIndex + 1
                leader = Leader(index, leader.ttl, member)

            # failover key
            failover = nodes.get(self._FAILOVER)
            if failover:
                failover = Failover.from_node(failover.modifiedIndex,
                                              failover.value)

            # get synchronization state
            sync = nodes.get(self._SYNC)
            sync = SyncState.from_node(sync and sync.modifiedIndex, sync
                                       and sync.value)

            self._cluster = Cluster(initialize, config, leader,
                                    last_leader_operation, members, failover,
                                    sync)
        except etcd.EtcdKeyNotFound:
            self._cluster = Cluster(None, None, None, None, [], None, None)
        except:
            logger.exception('get_cluster')
            raise EtcdError('Etcd is not responding properly')
Example #6
0
    def _load_cluster(self):
        prefix = self.client_path('')
        response = self._sync_obj.get(prefix, recursive=True)
        if not response:
            return Cluster(None, None, None, None, [], None, None, None)
        nodes = {
            os.path.relpath(key, prefix).replace('\\', '/'): value
            for key, value in response.items()
        }

        # get initialize flag
        initialize = nodes.get(self._INITIALIZE)
        initialize = initialize and initialize['value']

        # get global dynamic configuration
        config = nodes.get(self._CONFIG)
        config = config and ClusterConfig.from_node(config['index'],
                                                    config['value'])

        # get timeline history
        history = nodes.get(self._HISTORY)
        history = history and TimelineHistory.from_node(
            history['index'], history['value'])

        # get last leader operation
        last_leader_operation = nodes.get(self._LEADER_OPTIME)
        last_leader_operation = 0 if last_leader_operation is None else int(
            last_leader_operation['value'])

        # get list of members
        members = [
            self.member(k, n) for k, n in nodes.items()
            if k.startswith(self._MEMBERS) and k.count('/') == 1
        ]

        # get leader
        leader = nodes.get(self._LEADER)
        if leader:
            member = Member(-1, leader['value'], None, {})
            member = ([m for m in members if m.name == leader['value']]
                      or [member])[0]
            leader = Leader(leader['index'], None, member)

        # failover key
        failover = nodes.get(self._FAILOVER)
        if failover:
            failover = Failover.from_node(failover['index'], failover['value'])

        # get synchronization state
        sync = nodes.get(self._SYNC)
        sync = SyncState.from_node(sync and sync['index'], sync
                                   and sync['value'])

        return Cluster(initialize, config, leader, last_leader_operation,
                       members, failover, sync, history)
 def test_sync_replication_slots(self):
     self.p.start()
     cluster = Cluster(True, self.leader, 0, [self.me, self.other, self.leadermem], None)
     self.p.sync_replication_slots(cluster)
     self.p.query = Mock(side_effect=psycopg2.OperationalError)
     self.p.schedule_load_slots = True
     self.p.sync_replication_slots(cluster)
Example #8
0
 def test_sync_replication_slots(self):
     config = ClusterConfig(1, {'slots': {'test_3': {'database': 'a', 'plugin': 'b'},
                                          'A': 0, 'ls': 0, 'b': {'type': 'logical', 'plugin': '1'}},
                                'ignore_slots': [{'name': 'blabla'}]}, 1)
     cluster = Cluster(True, config, self.leader, 0,
                       [self.me, self.other, self.leadermem], None, None, None, {'test_3': 10})
     with mock.patch('patroni.postgresql.Postgresql._query', Mock(side_effect=psycopg.OperationalError)):
         self.s.sync_replication_slots(cluster, False)
     self.p.set_role('standby_leader')
     self.s.sync_replication_slots(cluster, False)
     self.p.set_role('replica')
     with patch.object(Postgresql, 'is_leader', Mock(return_value=False)):
         self.s.sync_replication_slots(cluster, False)
     self.p.set_role('master')
     with mock.patch('patroni.postgresql.Postgresql.role', new_callable=PropertyMock(return_value='replica')):
         self.s.sync_replication_slots(cluster, False)
     with patch.object(SlotsHandler, 'drop_replication_slot', Mock(return_value=True)),\
             patch('patroni.dcs.logger.error', new_callable=Mock()) as errorlog_mock:
         alias1 = Member(0, 'test-3', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5436/postgres'})
         alias2 = Member(0, 'test.3', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5436/postgres'})
         cluster.members.extend([alias1, alias2])
         self.s.sync_replication_slots(cluster, False)
         self.assertEqual(errorlog_mock.call_count, 5)
         ca = errorlog_mock.call_args_list[0][0][1]
         self.assertTrue("test-3" in ca, "non matching {0}".format(ca))
         self.assertTrue("test.3" in ca, "non matching {0}".format(ca))
         with patch.object(Postgresql, 'major_version', PropertyMock(return_value=90618)):
             self.s.sync_replication_slots(cluster, False)
Example #9
0
 def setUp(self):
     super(TestSlotsHandler, self).setUp()
     self.s = self.p.slots_handler
     self.p.start()
     config = ClusterConfig(1, {'slots': {'ls': {'database': 'a', 'plugin': 'b'}}}, 1)
     self.cluster = Cluster(True, config, self.leader, 0,
                            [self.me, self.other, self.leadermem], None, None, None, {'ls': 12345})
Example #10
0
 def test_sync_replication_slots(self):
     self.p.start()
     cluster = Cluster(True, None, self.leader, 0,
                       [self.me, self.other, self.leadermem], None, None)
     with mock.patch('patroni.postgresql.Postgresql._query',
                     Mock(side_effect=psycopg2.OperationalError)):
         self.p.sync_replication_slots(cluster)
     self.p.sync_replication_slots(cluster)
     with mock.patch('patroni.postgresql.Postgresql.role',
                     new_callable=PropertyMock(return_value='replica')):
         self.p.sync_replication_slots(cluster)
     with mock.patch('patroni.postgresql.logger.error',
                     new_callable=Mock()) as errorlog_mock:
         self.p.query = Mock()
         alias1 = Member(
             0, 'test-3', 28, {
                 'conn_url':
                 'postgres://*****:*****@127.0.0.1:5436/postgres'
             })
         alias2 = Member(
             0, 'test.3', 28, {
                 'conn_url':
                 'postgres://*****:*****@127.0.0.1:5436/postgres'
             })
         cluster.members.extend([alias1, alias2])
         self.p.sync_replication_slots(cluster)
         errorlog_mock.assert_called_once()
         assert "test-3" in errorlog_mock.call_args[0][1]
         assert "test.3" in errorlog_mock.call_args[0][1]
Example #11
0
    def test_pick_sync_standby(self):
        cluster = Cluster(True, None, self.leader, 0, [self.me, self.other, self.leadermem], None,
                          SyncState(0, self.me.name, self.leadermem.name))

        with patch.object(Postgresql, "query", return_value=[
                    (self.leadermem.name, 'streaming', 'sync'),
                    (self.me.name, 'streaming', 'async'),
                    (self.other.name, 'streaming', 'async'),
                ]):
            self.assertEquals(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, True))

        with patch.object(Postgresql, "query", return_value=[
                    (self.me.name, 'streaming', 'async'),
                    (self.leadermem.name, 'streaming', 'potential'),
                    (self.other.name, 'streaming', 'async'),
                ]):
            self.assertEquals(self.p.pick_synchronous_standby(cluster), (self.leadermem.name, False))

        with patch.object(Postgresql, "query", return_value=[
                    (self.me.name, 'streaming', 'async'),
                    (self.other.name, 'streaming', 'async'),
                ]):
            self.assertEquals(self.p.pick_synchronous_standby(cluster), (self.me.name, False))

        with patch.object(Postgresql, "query", return_value=[
                    ('missing', 'streaming', 'sync'),
                    (self.me.name, 'streaming', 'async'),
                    (self.other.name, 'streaming', 'async'),
                ]):
            self.assertEquals(self.p.pick_synchronous_standby(cluster), (self.me.name, False))

        with patch.object(Postgresql, "query", return_value=[]):
            self.assertEquals(self.p.pick_synchronous_standby(cluster), (None, False))
Example #12
0
 def test_check_logical_slots_readiness(self):
     self.s.copy_logical_slots(self.leader, ['ls'])
     config = ClusterConfig(1, {'slots': {'ls': {'database': 'a', 'plugin': 'b'}}}, 1)
     cluster = Cluster(True, config, self.leader, 0,
                       [self.me, self.other, self.leadermem], None, None, None, {'ls': 12345})
     self.assertEqual(self.s.sync_replication_slots(cluster, False), [])
     with patch.object(MockCursor, 'rowcount', PropertyMock(return_value=1), create=True):
         self.s.check_logical_slots_readiness(cluster, False, None)
Example #13
0
    def _inner_load_cluster(self):
        self._fetch_cluster = False
        self.event.clear()
        nodes = set(
            self.get_children(self.client_path(''), self.cluster_watcher))
        if not nodes:
            self._fetch_cluster = True

        # get initialize flag
        initialize = (self.get_node(self.initialize_path)
                      or [None])[0] if self._INITIALIZE in nodes else None

        # get global dynamic configuration
        config = self.get_node(
            self.config_path,
            watch=self.cluster_watcher) if self._CONFIG in nodes else None
        config = config and ClusterConfig.from_node(config[1].version,
                                                    config[0], config[1].mzxid)

        # get last leader operation
        last_leader_operation = self._OPTIME in nodes and self._fetch_cluster and self.get_node(
            self.leader_optime_path)
        last_leader_operation = last_leader_operation and int(
            last_leader_operation[0]) or 0

        # get list of members
        members = self.load_members() if self._MEMBERS[:-1] in nodes else []

        # get leader
        leader = self.get_node(
            self.leader_path) if self._LEADER in nodes else None
        if leader:
            client_id = self._client.client_id
            if not self._ctl and leader[0] == self._name and client_id is not None \
                    and client_id[0] != leader[1].ephemeralOwner:
                logger.info(
                    'I am leader but not owner of the session. Removing leader node'
                )
                self._client.delete(self.leader_path)
                leader = None

            if leader:
                member = Member(-1, leader[0], None, {})
                member = ([m for m in members if m.name == leader[0]]
                          or [member])[0]
                leader = Leader(leader[1].version, leader[1].ephemeralOwner,
                                member)
                self._fetch_cluster = member.index == -1

        # failover key
        failover = self.get_node(
            self.failover_path,
            watch=self.cluster_watcher) if self._FAILOVER in nodes else None
        failover = failover and Failover.from_node(failover[1].version,
                                                   failover[0])

        self._cluster = Cluster(initialize, config, leader,
                                last_leader_operation, members, failover)
Example #14
0
 def test_sync_replication_slots(self):
     self.p.start()
     cluster = Cluster(True, None, self.leader, 0, [self.me, self.other, self.leadermem], None)
     self.p.sync_replication_slots(cluster)
     self.p.query = Mock(side_effect=psycopg2.OperationalError)
     self.p.schedule_load_slots = True
     self.p.sync_replication_slots(cluster)
     self.p.schedule_load_slots = False
     with mock.patch('patroni.postgresql.Postgresql.role', new_callable=PropertyMock(return_value='replica')):
         self.p.sync_replication_slots(cluster)
Example #15
0
    def test_pick_sync_standby(self):
        cluster = Cluster(True, None, self.leader, 0,
                          [self.me, self.other, self.leadermem], None,
                          SyncState(0, self.me.name,
                                    self.leadermem.name), None)
        mock_cursor = Mock()
        mock_cursor.fetchone.return_value = ('remote_apply', )

        with patch.object(Postgresql,
                          "query",
                          side_effect=[
                              mock_cursor,
                              [(self.leadermem.name, 'sync', 1),
                               (self.me.name, 'async', 2),
                               (self.other.name, 'async', 2)]
                          ]):
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([self.leadermem.name], [self.leadermem.name]))

        with patch.object(Postgresql,
                          "query",
                          side_effect=[
                              mock_cursor,
                              [(self.leadermem.name, 'potential', 1),
                               (self.me.name, 'async', 2),
                               (self.other.name, 'async', 2)]
                          ]):
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([self.leadermem.name], []))

        with patch.object(Postgresql,
                          "query",
                          side_effect=[
                              mock_cursor,
                              [(self.me.name, 'async', 1),
                               (self.other.name, 'async', 2)]
                          ]):
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([self.me.name], []))

        with patch.object(Postgresql,
                          "query",
                          side_effect=[
                              mock_cursor,
                              [('missing', 'sync', 1),
                               (self.me.name, 'async', 2),
                               (self.other.name, 'async', 3)]
                          ]):
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([self.me.name], []))

        with patch.object(Postgresql, "query", side_effect=[mock_cursor, []]):
            self.p._major_version = 90400
            self.assertEqual(self.p.pick_synchronous_standby(cluster),
                             ([], []))
Example #16
0
    def _inner_load_cluster(self):
        self._fetch_cluster = False
        self.event.clear()
        nodes = set(self.get_children(self.client_path(''), self.cluster_watcher))
        if not nodes:
            self._fetch_cluster = True

        # get initialize flag
        initialize = (self.get_node(self.initialize_path) or [None])[0] if self._INITIALIZE in nodes else None

        # get global dynamic configuration
        config = self.get_node(self.config_path, watch=self.cluster_watcher) if self._CONFIG in nodes else None
        config = config and ClusterConfig.from_node(config[1].version, config[0], config[1].mzxid)

        # get timeline history
        history = self.get_node(self.history_path, watch=self.cluster_watcher) if self._HISTORY in nodes else None
        history = history and TimelineHistory.from_node(history[1].mzxid, history[0])

        # get synchronization state
        sync = self.get_node(self.sync_path, watch=self.cluster_watcher) if self._SYNC in nodes else None
        sync = SyncState.from_node(sync and sync[1].version, sync and sync[0])

        # get list of members
        sync_standby = sync.leader == self._name and sync.members or []
        members = self.load_members(sync_standby) if self._MEMBERS[:-1] in nodes else []

        # get leader
        leader = self.get_node(self.leader_path) if self._LEADER in nodes else None
        if leader:
            client_id = self._client.client_id
            if not self._ctl and leader[0] == self._name and client_id is not None \
                    and client_id[0] != leader[1].ephemeralOwner:
                logger.info('I am leader but not owner of the session. Removing leader node')
                self._client.delete(self.leader_path)
                leader = None

            if leader:
                member = Member(-1, leader[0], None, {})
                member = ([m for m in members if m.name == leader[0]] or [member])[0]
                leader = Leader(leader[1].version, leader[1].ephemeralOwner, member)
                self._fetch_cluster = member.index == -1

        # get last leader operation
        last_leader_operation = self._OPTIME in nodes and self.get_leader_optime(leader)

        # failover key
        failover = self.get_node(self.failover_path, watch=self.cluster_watcher) if self._FAILOVER in nodes else None
        failover = failover and Failover.from_node(failover[1].version, failover[0])

        return Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history)
    def _inner_load_cluster(self):
        self._fetch_cluster = False
        self.event.clear()
        nodes = set(
            self.get_children(self.client_path(''), self.cluster_watcher))
        if not nodes:
            self._fetch_cluster = True

        # get initialize flag
        initialize = (self.get_node(self.initialize_path)
                      or [None])[0] if self._INITIALIZE in nodes else None

        # get list of members
        members = self.load_members() if self._MEMBERS[:-1] in nodes else []

        # get leader
        leader = self.get_node(
            self.leader_path) if self._LEADER in nodes else None
        if leader:
            client_id = self._client.client_id
            if leader[0] == self._name and client_id is not None and client_id[
                    0] != leader[1].ephemeralOwner:
                logger.info(
                    'I am leader but not owner of the session. Removing leader node'
                )
                self._client.delete(self.leader_path)
                leader = None

            if leader:
                member = Member(-1, leader[0], None, {})
                member = ([m for m in members if m.name == leader[0]]
                          or [member])[0]
                leader = Leader(leader[1].version, leader[1].ephemeralOwner,
                                member)
                self._fetch_cluster = member.index == -1

        # failover key
        failover = self.get_node(
            self.failover_path,
            watch=self.cluster_watcher) if self._FAILOVER in nodes else None
        if failover:
            failover = Failover.from_node(failover[1].version, failover[0])

        # get last leader operation
        optime = self.get_node(
            self.leader_optime_path
        ) if self._OPTIME in nodes and self._fetch_cluster else None
        self._last_leader_operation = 0 if optime is None else int(optime[0])
        self._cluster = Cluster(initialize, leader,
                                self._last_leader_operation, members, failover)
Example #18
0
 def _load_cluster(self):
     cluster = self.cluster
     if self._fetch_cluster or cluster is None:
         try:
             cluster = self._client.retry(self._inner_load_cluster)
         except Exception:
             logger.exception('get_cluster')
             self.cluster_watcher(None)
             raise ZooKeeperError('ZooKeeper in not responding properly')
     # Optime ZNode was updated or doesn't exist and we are not leader
     elif (self._fetch_optime and not self._fetch_cluster or not cluster.last_leader_operation) and\
             not (cluster.leader and cluster.leader.name == self._name):
         try:
             optime = self.get_leader_optime(cluster.leader)
             cluster = Cluster(cluster.initialize, cluster.config, cluster.leader, optime,
                               cluster.members, cluster.failover, cluster.sync, cluster.history)
         except Exception:
             pass
     return cluster
Example #19
0
    def test_process_permanent_slots(self):
        config = ClusterConfig(
            1, {
                'slots': {
                    'ls': {
                        'database': 'a',
                        'plugin': 'b'
                    }
                },
                'ignore_slots': [{
                    'name': 'blabla'
                }]
            }, 1)
        cluster = Cluster(True, config, self.leader, 0,
                          [self.me, self.other, self.leadermem], None, None,
                          None, None)

        self.s.sync_replication_slots(cluster, False)
        with patch.object(Postgresql, '_query') as mock_query:
            self.p.reset_cluster_info_state(None)
            mock_query.return_value.fetchone.return_value = (
                1, 0, 0, 0, 0, 0, 0, 0, 0, [{
                    "slot_name": "ls",
                    "type": "logical",
                    "datoid": 5,
                    "plugin": "b",
                    "confirmed_flush_lsn": 12345,
                    "catalog_xmin": 105
                }])
            self.assertEqual(self.p.slots(), {'ls': 12345})

            self.p.reset_cluster_info_state(None)
            mock_query.return_value.fetchone.return_value = (
                1, 0, 0, 0, 0, 0, 0, 0, 0, [{
                    "slot_name": "ls",
                    "type": "logical",
                    "datoid": 6,
                    "plugin": "b",
                    "confirmed_flush_lsn": 12345,
                    "catalog_xmin": 105
                }])
            self.assertEqual(self.p.slots(), {})
Example #20
0
def get_cluster(initialize, leader, members, failover, sync):
    return Cluster(initialize, ClusterConfig(1, {1: 2}, 1), leader, 10,
                   members, failover, sync)
Example #21
0
def get_cluster(initialize, leader, members, failover):
    return Cluster(initialize, leader, 10, members, failover)
Example #22
0
    def _load_cluster(self):
        try:
            # get list of members
            response = self.retry(self._api.list_namespaced_pod,
                                  self._namespace,
                                  label_selector=self._label_selector)
            members = [self.member(pod) for pod in response.items]

            response = self.retry(self._api.list_namespaced_kind,
                                  self._namespace,
                                  label_selector=self._label_selector)
            nodes = {item.metadata.name: item for item in response.items}

            config = nodes.get(self.config_path)
            metadata = config and config.metadata
            annotations = metadata and metadata.annotations or {}

            # get initialize flag
            initialize = annotations.get(self._INITIALIZE)

            # get global dynamic configuration
            config = ClusterConfig.from_node(
                metadata and metadata.resource_version,
                annotations.get(self._CONFIG) or '{}')

            # get timeline history
            history = TimelineHistory.from_node(
                metadata and metadata.resource_version,
                annotations.get(self._HISTORY) or '[]')

            leader = nodes.get(self.leader_path)
            metadata = leader and leader.metadata
            self._leader_resource_version = metadata.resource_version if metadata else None
            self._leader_observed_subsets = leader.subsets if self.__subsets and leader else []
            annotations = metadata and metadata.annotations or {}

            # get last leader operation
            last_leader_operation = annotations.get(self._OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(
                last_leader_operation)

            # get leader
            leader_record = {
                n: annotations.get(n)
                for n in (self._LEADER, 'acquireTime', 'ttl', 'renewTime',
                          'transitions') if n in annotations
            }
            if (leader_record or self._leader_observed_record
                ) and leader_record != self._leader_observed_record:
                self._leader_observed_record = leader_record
                self._leader_observed_time = time.time()

            leader = leader_record.get(self._LEADER)
            try:
                ttl = int(leader_record.get('ttl')) or self._ttl
            except (TypeError, ValueError):
                ttl = self._ttl

            if not metadata or not self._leader_observed_time or self._leader_observed_time + ttl < time.time(
            ):
                leader = None

            if metadata:
                member = Member(-1, leader, None, {})
                member = ([m for m in members if m.name == leader]
                          or [member])[0]
                leader = Leader(response.metadata.resource_version, None,
                                member)

            # failover key
            failover = nodes.get(self.failover_path)
            metadata = failover and failover.metadata
            failover = Failover.from_node(
                metadata and metadata.resource_version, metadata
                and metadata.annotations)

            # get synchronization state
            sync = nodes.get(self.sync_path)
            metadata = sync and sync.metadata
            sync = SyncState.from_node(metadata and metadata.resource_version,
                                       metadata and metadata.annotations)

            self._cluster = Cluster(initialize, config, leader,
                                    last_leader_operation, members, failover,
                                    sync, history)
        except Exception:
            logger.exception('get_cluster')
            raise KubernetesError('Kubernetes API is not responding properly')
Example #23
0
def get_cluster(initialize, leader, members, failover, sync):
    history = TimelineHistory(1, [(1, 67197376, 'no recovery target specified', datetime.datetime.now().isoformat())])
    return Cluster(initialize, ClusterConfig(1, {1: 2}, 1), leader, 10, members, failover, sync, history)