def _load_cluster(self):
        try:
            path = self.client_path('/')
            _, results = self._client.kv.get(path, recurse=True)

            if results is None:
                raise NotFound

            nodes = {}
            for node in results:
                node['Value'] = (node['Value'] or b'').decode('utf-8')
                nodes[os.path.relpath(node['Key'], path)] = node

            # get initialize flag
            initialize = nodes.get(self._INITIALIZE)
            initialize = initialize and initialize['Value']

            # get last leader operation
            last_leader_operation = nodes.get(self._LEADER_OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(
                last_leader_operation['Value'])

            # get list of members
            members = [
                self.member(n) for k, n in nodes.items()
                if k.startswith(self._MEMBERS) and k.count('/') == 1
            ]

            # get leader
            leader = nodes.get(self._LEADER)
            if leader and leader[
                    'Value'] == self._name and self._session != leader.get(
                        'Session', 'x'):
                logger.info(
                    'I am leader but not owner of the session. Removing leader node'
                )
                self._client.kv.delete(self.leader_path,
                                       cas=leader['ModifyIndex'])
                leader = None

            if leader:
                member = Member(-1, leader['Value'], None, {})
                member = ([m for m in members if m.name == leader['Value']]
                          or [member])[0]
                leader = Leader(leader['ModifyIndex'], leader.get('Session'),
                                member)

            # failover key
            failover = nodes.get(self._FAILOVER)
            if failover:
                failover = Failover.from_node(failover['ModifyIndex'],
                                              failover['Value'])

            self._cluster = Cluster(initialize, leader, last_leader_operation,
                                    members, failover)
        except NotFound:
            self._cluster = Cluster(False, None, None, [], None)
        except:
            logger.exception('get_cluster')
            raise ConsulError('Consul is not responding properly')
Example #2
0
def get_cluster_initialized_without_leader(leader=False, failover=None):
    m1 = Member(0, 'leader', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5435/postgres',
                                  'api_url': 'http://127.0.0.1:8008/patroni', 'xlog_location': 4})
    l = Leader(0, 0, m1) if leader else None
    m2 = Member(0, 'other', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5436/postgres',
                                 'api_url': 'http://127.0.0.1:8011/patroni', 'tags': {'clonefrom': True}})
    return get_cluster(True, l, [m1, m2], failover)
Example #3
0
    def _load_cluster(self):
        try:
            path = self.client_path('/')
            _, results = self.retry(self._client.kv.get, path, recurse=True)

            if results is None:
                raise NotFound

            nodes = {}
            for node in results:
                node['Value'] = (node['Value'] or b'').decode('utf-8')
                nodes[node['Key'][len(path):].lstrip('/')] = node

            # get initialize flag
            initialize = nodes.get(self._INITIALIZE)
            initialize = initialize and initialize['Value']

            # get global dynamic configuration
            config = nodes.get(self._CONFIG)
            config = config and ClusterConfig.from_node(config['ModifyIndex'], config['Value'])

            # get timeline history
            history = nodes.get(self._HISTORY)
            history = history and TimelineHistory.from_node(history['ModifyIndex'], history['Value'])

            # get last leader operation
            last_leader_operation = nodes.get(self._LEADER_OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(last_leader_operation['Value'])

            # get list of members
            members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1]

            # get leader
            leader = nodes.get(self._LEADER)
            if not self._ctl and leader and leader['Value'] == self._name \
                    and self._session != leader.get('Session', 'x'):
                logger.info('I am leader but not owner of the session. Removing leader node')
                self._client.kv.delete(self.leader_path, cas=leader['ModifyIndex'])
                leader = None

            if leader:
                member = Member(-1, leader['Value'], None, {})
                member = ([m for m in members if m.name == leader['Value']] or [member])[0]
                leader = Leader(leader['ModifyIndex'], leader.get('Session'), member)

            # failover key
            failover = nodes.get(self._FAILOVER)
            if failover:
                failover = Failover.from_node(failover['ModifyIndex'], failover['Value'])

            # get synchronization state
            sync = nodes.get(self._SYNC)
            sync = SyncState.from_node(sync and sync['ModifyIndex'], sync and sync['Value'])

            self._cluster = Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history)
        except NotFound:
            self._cluster = Cluster(None, None, None, None, [], None, None, None)
        except Exception:
            logger.exception('get_cluster')
            raise ConsulError('Consul is not responding properly')
Example #4
0
    def _load_cluster(self):
        try:
            result = self.retry(self._client.read, self.client_path(''), recursive=True)
            nodes = {os.path.relpath(node.key, result.key): node for node in result.leaves}

            # get initialize flag
            initialize = nodes.get(self._INITIALIZE)
            initialize = initialize and initialize.value

            # get last leader operation
            last_leader_operation = nodes.get(self._LEADER_OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(last_leader_operation.value)

            # get list of members
            members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1]

            # get leader
            leader = nodes.get(self._LEADER)
            if leader:
                member = Member(-1, leader.value, None, {})
                member = ([m for m in members if m.name == leader.value] or [member])[0]
                leader = Leader(leader.modifiedIndex, leader.ttl, member)

            # failover key
            failover = nodes.get(self._FAILOVER)
            if failover:
                failover = Failover.from_node(failover.modifiedIndex, failover.value)

            self._cluster = Cluster(initialize, leader, last_leader_operation, members, failover)
        except etcd.EtcdKeyNotFound:
            self._cluster = Cluster(False, None, None, [], None)
        except:
            logger.exception('get_cluster')
            raise EtcdError('Etcd is not responding properly')
Example #5
0
def get_cluster_initialized_without_leader(leader=False,
                                           failover=None,
                                           sync=None):
    m1 = Member(
        0, 'leader', 28, {
            'conn_url':
            'postgres://*****:*****@127.0.0.1:5435/postgres',
            'api_url': 'http://127.0.0.1:8008/patroni',
            'xlog_location': 4
        })
    leader = Leader(0, 0, m1) if leader else None
    m2 = Member(
        0, 'other', 28, {
            'conn_url':
            'postgres://*****:*****@127.0.0.1:5436/postgres',
            'api_url': 'http://127.0.0.1:8011/patroni',
            'state': 'running',
            'tags': {
                'clonefrom': True
            },
            'scheduled_restart': {
                'schedule': "2100-01-01 10:53:07.560445+00:00",
                'postgres_version': '99.0.0'
            }
        })
    syncstate = SyncState(0 if sync else None, sync and sync[0], sync
                          and sync[1])
    return get_cluster(True, leader, [m1, m2], failover, syncstate)
Example #6
0
    def _inner_load_cluster(self):
        self._fetch_cluster = False
        self.event.clear()
        nodes = set(
            self.get_children(self.client_path(''), self.cluster_watcher))
        if not nodes:
            self._fetch_cluster = True

        # get initialize flag
        initialize = (self.get_node(self.initialize_path)
                      or [None])[0] if self._INITIALIZE in nodes else None

        # get global dynamic configuration
        config = self.get_node(
            self.config_path,
            watch=self.cluster_watcher) if self._CONFIG in nodes else None
        config = config and ClusterConfig.from_node(config[1].version,
                                                    config[0], config[1].mzxid)

        # get last leader operation
        last_leader_operation = self._OPTIME in nodes and self._fetch_cluster and self.get_node(
            self.leader_optime_path)
        last_leader_operation = last_leader_operation and int(
            last_leader_operation[0]) or 0

        # get list of members
        members = self.load_members() if self._MEMBERS[:-1] in nodes else []

        # get leader
        leader = self.get_node(
            self.leader_path) if self._LEADER in nodes else None
        if leader:
            client_id = self._client.client_id
            if not self._ctl and leader[0] == self._name and client_id is not None \
                    and client_id[0] != leader[1].ephemeralOwner:
                logger.info(
                    'I am leader but not owner of the session. Removing leader node'
                )
                self._client.delete(self.leader_path)
                leader = None

            if leader:
                member = Member(-1, leader[0], None, {})
                member = ([m for m in members if m.name == leader[0]]
                          or [member])[0]
                leader = Leader(leader[1].version, leader[1].ephemeralOwner,
                                member)
                self._fetch_cluster = member.index == -1

        # failover key
        failover = self.get_node(
            self.failover_path,
            watch=self.cluster_watcher) if self._FAILOVER in nodes else None
        failover = failover and Failover.from_node(failover[1].version,
                                                   failover[0])

        self._cluster = Cluster(initialize, config, leader,
                                last_leader_operation, members, failover)
Example #7
0
    def _load_cluster(self):
        try:
            result = self.retry(self._client.read,
                                self.client_path(''),
                                recursive=True)
            nodes = {
                os.path.relpath(node.key, result.key): node
                for node in result.leaves
            }

            # get initialize flag
            initialize = nodes.get(self._INITIALIZE)
            initialize = initialize and initialize.value

            # get global dynamic configuration
            config = nodes.get(self._CONFIG)
            config = config and ClusterConfig.from_node(
                config.modifiedIndex, config.value)

            # get last leader operation
            last_leader_operation = nodes.get(self._LEADER_OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(
                last_leader_operation.value)

            # get list of members
            members = [
                self.member(n) for k, n in nodes.items()
                if k.startswith(self._MEMBERS) and k.count('/') == 1
            ]

            # get leader
            leader = nodes.get(self._LEADER)
            if leader:
                member = Member(-1, leader.value, None, {})
                member = ([m for m in members if m.name == leader.value]
                          or [member])[0]
                index = result.etcd_index if result.etcd_index > leader.modifiedIndex else leader.modifiedIndex + 1
                leader = Leader(index, leader.ttl, member)

            # failover key
            failover = nodes.get(self._FAILOVER)
            if failover:
                failover = Failover.from_node(failover.modifiedIndex,
                                              failover.value)

            # get synchronization state
            sync = nodes.get(self._SYNC)
            sync = SyncState.from_node(sync and sync.modifiedIndex, sync
                                       and sync.value)

            self._cluster = Cluster(initialize, config, leader,
                                    last_leader_operation, members, failover,
                                    sync)
        except etcd.EtcdKeyNotFound:
            self._cluster = Cluster(None, None, None, None, [], None, None)
        except:
            logger.exception('get_cluster')
            raise EtcdError('Etcd is not responding properly')
Example #8
0
 def setUp(self):
     self.data_dir = 'data/test0'
     self.config_dir = self.data_dir
     if not os.path.exists(self.data_dir):
         os.makedirs(self.data_dir)
     self.p = Postgresql({
         'name': 'test0',
         'scope': 'batman',
         'data_dir': self.data_dir,
         'config_dir': self.config_dir,
         'retry_timeout': 10,
         'pgpass': '******',
         'listen': '127.0.0.2, 127.0.0.3:5432',
         'connect_address': '127.0.0.2:5432',
         'authentication': {
             'superuser': {
                 'username': '******',
                 'password': '******'
             },
             'replication': {
                 'username': '******',
                 'password': '******'
             }
         },
         'remove_data_directory_on_rewind_failure': True,
         'use_pg_rewind': True,
         'pg_ctl_timeout': 'bla',
         'parameters': self._PARAMETERS,
         'recovery_conf': {
             'foo': 'bar'
         },
         'pg_hba': ['host all all 0.0.0.0/0 md5'],
         'callbacks': {
             'on_start': 'true',
             'on_stop': 'true',
             'on_reload': 'true',
             'on_restart': 'true',
             'on_role_change': 'true'
         }
     })
     self.p._callback_executor = Mock()
     self.leadermem = Member(0, 'leader', 28, {
         'conn_url':
         'postgres://*****:*****@127.0.0.1:5435/postgres'
     })
     self.leader = Leader(-1, 28, self.leadermem)
     self.other = Member(
         0, 'test-1', 28, {
             'conn_url':
             'postgres://*****:*****@127.0.0.1:5433/postgres',
             'tags': {
                 'replicatefrom': 'leader'
             }
         })
     self.me = Member(0, 'test0', 28, {
         'conn_url':
         'postgres://*****:*****@127.0.0.1:5434/postgres'
     })
Example #9
0
    def _load_cluster(self):
        prefix = self.client_path('')
        response = self._sync_obj.get(prefix, recursive=True)
        if not response:
            return Cluster(None, None, None, None, [], None, None, None)
        nodes = {
            os.path.relpath(key, prefix).replace('\\', '/'): value
            for key, value in response.items()
        }

        # get initialize flag
        initialize = nodes.get(self._INITIALIZE)
        initialize = initialize and initialize['value']

        # get global dynamic configuration
        config = nodes.get(self._CONFIG)
        config = config and ClusterConfig.from_node(config['index'],
                                                    config['value'])

        # get timeline history
        history = nodes.get(self._HISTORY)
        history = history and TimelineHistory.from_node(
            history['index'], history['value'])

        # get last leader operation
        last_leader_operation = nodes.get(self._LEADER_OPTIME)
        last_leader_operation = 0 if last_leader_operation is None else int(
            last_leader_operation['value'])

        # get list of members
        members = [
            self.member(k, n) for k, n in nodes.items()
            if k.startswith(self._MEMBERS) and k.count('/') == 1
        ]

        # get leader
        leader = nodes.get(self._LEADER)
        if leader:
            member = Member(-1, leader['value'], None, {})
            member = ([m for m in members if m.name == leader['value']]
                      or [member])[0]
            leader = Leader(leader['index'], None, member)

        # failover key
        failover = nodes.get(self._FAILOVER)
        if failover:
            failover = Failover.from_node(failover['index'], failover['value'])

        # get synchronization state
        sync = nodes.get(self._SYNC)
        sync = SyncState.from_node(sync and sync['index'], sync
                                   and sync['value'])

        return Cluster(initialize, config, leader, last_leader_operation,
                       members, failover, sync, history)
Example #10
0
    def setUp(self):
        super(BaseTestPostgresql, self).setUp()

        if not os.path.exists(self.p.data_dir):
            os.makedirs(self.p.data_dir)

        self.leadermem = Member(0, 'leader', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5435/postgres'})
        self.leader = Leader(-1, 28, self.leadermem)
        self.other = Member(0, 'test-1', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5433/postgres',
                                              'tags': {'replicatefrom': 'leader'}})
        self.me = Member(0, 'test0', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5434/postgres'})
    def _inner_load_cluster(self):
        self._fetch_cluster = False
        self.event.clear()
        nodes = set(
            self.get_children(self.client_path(''), self.cluster_watcher))
        if not nodes:
            self._fetch_cluster = True

        # get initialize flag
        initialize = (self.get_node(self.initialize_path)
                      or [None])[0] if self._INITIALIZE in nodes else None

        # get list of members
        members = self.load_members() if self._MEMBERS[:-1] in nodes else []

        # get leader
        leader = self.get_node(
            self.leader_path) if self._LEADER in nodes else None
        if leader:
            client_id = self._client.client_id
            if leader[0] == self._name and client_id is not None and client_id[
                    0] != leader[1].ephemeralOwner:
                logger.info(
                    'I am leader but not owner of the session. Removing leader node'
                )
                self._client.delete(self.leader_path)
                leader = None

            if leader:
                member = Member(-1, leader[0], None, {})
                member = ([m for m in members if m.name == leader[0]]
                          or [member])[0]
                leader = Leader(leader[1].version, leader[1].ephemeralOwner,
                                member)
                self._fetch_cluster = member.index == -1

        # failover key
        failover = self.get_node(
            self.failover_path,
            watch=self.cluster_watcher) if self._FAILOVER in nodes else None
        if failover:
            failover = Failover.from_node(failover[1].version, failover[0])

        # get last leader operation
        optime = self.get_node(
            self.leader_optime_path
        ) if self._OPTIME in nodes and self._fetch_cluster else None
        self._last_leader_operation = 0 if optime is None else int(optime[0])
        self._cluster = Cluster(initialize, leader,
                                self._last_leader_operation, members, failover)
Example #12
0
    def _inner_load_cluster(self):
        self._fetch_cluster = False
        self.event.clear()
        nodes = set(self.get_children(self.client_path(''), self.cluster_watcher))
        if not nodes:
            self._fetch_cluster = True

        # get initialize flag
        initialize = (self.get_node(self.initialize_path) or [None])[0] if self._INITIALIZE in nodes else None

        # get global dynamic configuration
        config = self.get_node(self.config_path, watch=self.cluster_watcher) if self._CONFIG in nodes else None
        config = config and ClusterConfig.from_node(config[1].version, config[0], config[1].mzxid)

        # get timeline history
        history = self.get_node(self.history_path, watch=self.cluster_watcher) if self._HISTORY in nodes else None
        history = history and TimelineHistory.from_node(history[1].mzxid, history[0])

        # get synchronization state
        sync = self.get_node(self.sync_path, watch=self.cluster_watcher) if self._SYNC in nodes else None
        sync = SyncState.from_node(sync and sync[1].version, sync and sync[0])

        # get list of members
        sync_standby = sync.leader == self._name and sync.members or []
        members = self.load_members(sync_standby) if self._MEMBERS[:-1] in nodes else []

        # get leader
        leader = self.get_node(self.leader_path) if self._LEADER in nodes else None
        if leader:
            client_id = self._client.client_id
            if not self._ctl and leader[0] == self._name and client_id is not None \
                    and client_id[0] != leader[1].ephemeralOwner:
                logger.info('I am leader but not owner of the session. Removing leader node')
                self._client.delete(self.leader_path)
                leader = None

            if leader:
                member = Member(-1, leader[0], None, {})
                member = ([m for m in members if m.name == leader[0]] or [member])[0]
                leader = Leader(leader[1].version, leader[1].ephemeralOwner, member)
                self._fetch_cluster = member.index == -1

        # get last leader operation
        last_leader_operation = self._OPTIME in nodes and self.get_leader_optime(leader)

        # failover key
        failover = self.get_node(self.failover_path, watch=self.cluster_watcher) if self._FAILOVER in nodes else None
        failover = failover and Failover.from_node(failover[1].version, failover[0])

        return Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history)
Example #13
0
 def test_follow(self, mock_pg_rewind):
     self.p.follow(None, None)
     self.p.follow(self.leader, self.leader)
     self.p.follow(Leader(-1, 28, self.other), self.leader)
     self.p.rewind = mock_pg_rewind
     self.p.follow(self.leader, self.leader)
     with mock.patch('os.path.islink', MagicMock(return_value=True)):
         with mock.patch('patroni.postgresql.Postgresql.can_rewind', new_callable=PropertyMock(return_value=True)):
             with mock.patch('os.unlink', MagicMock(return_value=True)):
                 self.p.follow(self.leader, self.leader, recovery=True)
     with mock.patch('patroni.postgresql.Postgresql.can_rewind', new_callable=PropertyMock(return_value=True)):
         self.p.rewind.return_value = True
         self.p.follow(self.leader, self.leader, recovery=True)
         self.p.rewind.return_value = False
         self.p.follow(self.leader, self.leader, recovery=True)
     with mock.patch('patroni.postgresql.Postgresql.check_recovery_conf', MagicMock(return_value=True)):
         self.assertTrue(self.p.follow(None, None))
 def test_follow_the_leader(self, mock_pg_rewind):
     self.p.demote()
     self.p.follow_the_leader(None)
     self.p.demote()
     self.p.follow_the_leader(self.leader)
     self.p.follow_the_leader(Leader(-1, 28, self.other))
     self.p.rewind = mock_pg_rewind
     self.p.follow_the_leader(self.leader)
     self.p.require_rewind()
     with mock.patch('os.path.islink', MagicMock(return_value=True)):
         with mock.patch('patroni.postgresql.Postgresql.can_rewind', new_callable=PropertyMock(return_value=True)):
             with mock.patch('os.unlink', MagicMock(return_value=True)):
                 self.p.follow_the_leader(self.leader, recovery=True)
     self.p.require_rewind()
     with mock.patch('patroni.postgresql.Postgresql.can_rewind', new_callable=PropertyMock(return_value=True)):
         self.p.rewind.return_value = True
         self.p.follow_the_leader(self.leader, recovery=True)
         self.p.rewind.return_value = False
         self.p.follow_the_leader(self.leader, recovery=True)
Example #15
0
 def setUp(self):
     self.data_dir = 'data/test0'
     if not os.path.exists(self.data_dir):
         os.makedirs(self.data_dir)
     self.p = Postgresql({'name': 'test0', 'scope': 'batman', 'data_dir': self.data_dir, 'retry_timeout': 10,
                          'listen': '127.0.0.1, *:5432', 'connect_address': '127.0.0.2:5432',
                          'authentication': {'superuser': {'username': '******', 'password': '******'},
                                             'replication': {'username': '******', 'password': '******'}},
                          'use_pg_rewind': True,
                          'parameters': self._PARAMETERS,
                          'recovery_conf': {'foo': 'bar'},
                          'callbacks': {'on_start': 'true', 'on_stop': 'true',
                                        'on_restart': 'true', 'on_role_change': 'true',
                                        'on_reload': 'true'
                                        },
                          'restore': 'true'})
     self.leadermem = Member(0, 'leader', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5435/postgres'})
     self.leader = Leader(-1, 28, self.leadermem)
     self.other = Member(0, 'test1', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5433/postgres',
                         'tags': {'replicatefrom': 'leader'}})
     self.me = Member(0, 'test0', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5434/postgres'})
 def setUp(self):
     self.p = Postgresql({'name': 'test0', 'scope': 'batman', 'data_dir': 'data/test0',
                          'listen': '127.0.0.1, *:5432', 'connect_address': '127.0.0.2:5432',
                          'pg_hba': ['hostssl all all 0.0.0.0/0 md5', 'host all all 0.0.0.0/0 md5'],
                          'superuser': {'password': '******'},
                          'admin': {'username': '******', 'password': '******'},
                          'pg_rewind': {'username': '******', 'password': '******'},
                          'replication': {'username': '******',
                                          'password': '******',
                                          'network': '127.0.0.1/32'},
                          'parameters': {'foo': 'bar'}, 'recovery_conf': {'foo': 'bar'},
                          'callbacks': {'on_start': 'true', 'on_stop': 'true',
                                        'on_restart': 'true', 'on_role_change': 'true',
                                        'on_reload': 'true'
                                        },
                          'restore': 'true'})
     if not os.path.exists(self.p.data_dir):
         os.makedirs(self.p.data_dir)
     self.leadermem = Member(0, 'leader', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5435/postgres'})
     self.leader = Leader(-1, 28, self.leadermem)
     self.other = Member(0, 'test1', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5433/postgres'})
     self.me = Member(0, 'test0', 28, {'conn_url': 'postgres://*****:*****@127.0.0.1:5434/postgres'})
Example #17
0
    def _load_cluster(self):
        try:
            # get list of members
            response = self.retry(self._api.list_namespaced_pod,
                                  self._namespace,
                                  label_selector=self._label_selector)
            members = [self.member(pod) for pod in response.items]

            response = self.retry(self._api.list_namespaced_kind,
                                  self._namespace,
                                  label_selector=self._label_selector)
            nodes = {item.metadata.name: item for item in response.items}

            config = nodes.get(self.config_path)
            metadata = config and config.metadata
            annotations = metadata and metadata.annotations or {}

            # get initialize flag
            initialize = annotations.get(self._INITIALIZE)

            # get global dynamic configuration
            config = ClusterConfig.from_node(
                metadata and metadata.resource_version,
                annotations.get(self._CONFIG) or '{}')

            # get timeline history
            history = TimelineHistory.from_node(
                metadata and metadata.resource_version,
                annotations.get(self._HISTORY) or '[]')

            leader = nodes.get(self.leader_path)
            metadata = leader and leader.metadata
            self._leader_resource_version = metadata.resource_version if metadata else None
            self._leader_observed_subsets = leader.subsets if self.__subsets and leader else []
            annotations = metadata and metadata.annotations or {}

            # get last leader operation
            last_leader_operation = annotations.get(self._OPTIME)
            last_leader_operation = 0 if last_leader_operation is None else int(
                last_leader_operation)

            # get leader
            leader_record = {
                n: annotations.get(n)
                for n in (self._LEADER, 'acquireTime', 'ttl', 'renewTime',
                          'transitions') if n in annotations
            }
            if (leader_record or self._leader_observed_record
                ) and leader_record != self._leader_observed_record:
                self._leader_observed_record = leader_record
                self._leader_observed_time = time.time()

            leader = leader_record.get(self._LEADER)
            try:
                ttl = int(leader_record.get('ttl')) or self._ttl
            except (TypeError, ValueError):
                ttl = self._ttl

            if not metadata or not self._leader_observed_time or self._leader_observed_time + ttl < time.time(
            ):
                leader = None

            if metadata:
                member = Member(-1, leader, None, {})
                member = ([m for m in members if m.name == leader]
                          or [member])[0]
                leader = Leader(response.metadata.resource_version, None,
                                member)

            # failover key
            failover = nodes.get(self.failover_path)
            metadata = failover and failover.metadata
            failover = Failover.from_node(
                metadata and metadata.resource_version, metadata
                and metadata.annotations)

            # get synchronization state
            sync = nodes.get(self.sync_path)
            metadata = sync and sync.metadata
            sync = SyncState.from_node(metadata and metadata.resource_version,
                                       metadata and metadata.annotations)

            self._cluster = Cluster(initialize, config, leader,
                                    last_leader_operation, members, failover,
                                    sync, history)
        except Exception:
            logger.exception('get_cluster')
            raise KubernetesError('Kubernetes API is not responding properly')