def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock( return_value=['http://remotehost:2379']) self.p = Postgresql({ 'name': 'postgresql0', 'scope': 'dummy', 'listen': '127.0.0.1:5432', 'data_dir': 'data/postgresql0', 'superuser': {}, 'admin': {}, 'replication': { 'username': '', 'password': '', 'network': '' } }) self.p.set_state('running') self.p.set_role('replica') self.p.check_replication_lag = true self.p.can_create_replica_without_replication_connection = MagicMock( return_value=False) self.e = Etcd('foo', { 'ttl': 30, 'host': 'ok:2379', 'scope': 'test' }) self.ha = Ha(MockPatroni(self.p, self.e)) self.ha._async_executor.run_async = run_async self.ha.old_cluster = self.e.get_cluster() self.ha.cluster = get_cluster_not_initialized_without_leader() self.ha.load_cluster_from_dcs = Mock()
def setUp(self, mock_machines): mock_machines.__get__ = Mock(return_value=['http://remotehost:2379']) self.p = MockPostgresql() self.e = Etcd('foo', {'ttl': 30, 'host': 'ok:2379', 'scope': 'test'}) self.e.client.read = etcd_read self.e.client.write = etcd_write self.e.client.delete = Mock(side_effect=etcd.EtcdException()) self.ha = Ha(MockPatroni(self.p, self.e)) self.ha._async_executor.run_async = run_async self.ha.old_cluster = self.e.get_cluster() self.ha.cluster = get_cluster_not_initialized_without_leader() self.ha.load_cluster_from_dcs = Mock()
def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=[ 'http://localhost:2379', 'http://localhost:4001' ]) self.etcd = Etcd( 'foo', { 'namespace': '/patroni/', 'ttl': 30, 'host': 'localhost:2379', 'scope': 'test' })
def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=[ 'http://localhost:2379', 'http://localhost:4001' ]) self.etcd = Etcd( 'foo', { 'namespace': '/patroni/', 'ttl': 30, 'host': 'localhost:2379', 'scope': 'test' }) self.etcd.client.write = etcd_write self.etcd.client.read = etcd_read self.etcd.client.delete = Mock(side_effect=etcd.EtcdException())
def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://localhost:2379', 'http://localhost:4001']) self.etcd = Etcd('foo', {'namespace': '/patroni/', 'ttl': 30, 'host': 'localhost:2379', 'scope': 'test'}) self.etcd.client.write = etcd_write self.etcd.client.read = etcd_read self.etcd.client.delete = Mock(side_effect=etcd.EtcdException())
def set_up(self): time.sleep = time_sleep with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://localhost:2379', 'http://localhost:4001']) self.etcd = Etcd('foo', {'ttl': 30, 'host': 'localhost:2379', 'scope': 'test'}) self.etcd.client.write = etcd_write self.etcd.client.read = etcd_read
def get_dcs(name, config): if 'etcd' in config: return Etcd(name, config['etcd']) if 'zookeeper' in config: return ZooKeeper(name, config['zookeeper']) raise Exception( 'Can not find suitable configuration of distributed configuration store' )
def setUp(self): self.runner = CliRunner() with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock( return_value=['http://remotehost:2379']) self.e = Etcd('foo', { 'ttl': 30, 'host': 'ok:2379', 'scope': 'test' })
def setUp(self, mock_machines): mock_machines.__get__ = Mock(return_value=['http://remotehost:2379']) self.p = MockPostgresql() self.e = Etcd('foo', {'ttl': 30, 'host': 'ok:2379', 'scope': 'test'}) self.e.client.read = etcd_read self.e.client.write = etcd_write self.ha = Ha(MockPatroni(self.p, self.e)) self.ha._async_executor.run_async = run_async self.ha.old_cluster = self.e.get_cluster() self.ha.cluster = get_cluster_not_initialized_without_leader() self.ha.load_cluster_from_dcs = Mock()
def setUp(self, mock_machines): mock_machines.__get__ = Mock(return_value=["http://remotehost:2379"]) self.p = MockPostgresql() self.e = Etcd("foo", {"ttl": 30, "host": "ok:2379", "scope": "test"}) self.e.client.read = etcd_read self.e.client.write = etcd_write self.e.client.delete = Mock(side_effect=etcd.EtcdException()) self.ha = Ha(MockPatroni(self.p, self.e)) self.ha._async_executor.run_async = run_async self.ha.old_cluster = self.e.get_cluster() self.ha.cluster = get_cluster_not_initialized_without_leader() self.ha.load_cluster_from_dcs = Mock()
def get_dcs(name, config): if 'etcd' in config: from patroni.etcd import Etcd return Etcd(name, config['etcd']) if 'zookeeper' in config: from patroni.zookeeper import ZooKeeper return ZooKeeper(name, config['zookeeper']) if 'consul' in config: from patroni.consul import Consul return Consul(name, config['consul']) raise PatroniException( 'Can not find suitable configuration of distributed configuration store' )
def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://remotehost:2379']) self.p = Postgresql({'name': 'postgresql0', 'scope': 'dummy', 'listen': '127.0.0.1:5432', 'data_dir': 'data/postgresql0', 'superuser': {}, 'admin': {}, 'replication': {'username': '', 'password': '', 'network': ''}}) self.p.set_state('running') self.p.set_role('replica') self.p.check_replication_lag = true self.p.can_create_replica_without_replication_connection = MagicMock(return_value=False) self.e = Etcd('foo', {'ttl': 30, 'host': 'ok:2379', 'scope': 'test'}) self.ha = Ha(MockPatroni(self.p, self.e)) self.ha._async_executor.run_async = run_async self.ha.old_cluster = self.e.get_cluster() self.ha.cluster = get_cluster_not_initialized_without_leader() self.ha.load_cluster_from_dcs = Mock()
class TestEtcd(unittest.TestCase): def __init__(self, method_name='runTest'): self.setUp = self.set_up super(TestEtcd, self).__init__(method_name) def set_up(self): time.sleep = time_sleep with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://localhost:2379', 'http://localhost:4001']) self.etcd = Etcd('foo', {'ttl': 30, 'host': 'localhost:2379', 'scope': 'test'}) self.etcd.client.write = etcd_write self.etcd.client.read = etcd_read def test_get_etcd_client(self): time.sleep = time_sleep_exception with patch.object(etcd.Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(side_effect=etcd.EtcdException) self.assertRaises(SleepException, self.etcd.get_etcd_client, {'discovery_srv': 'test'}) def test_get_cluster(self): self.assertIsInstance(self.etcd.get_cluster(), Cluster) self.etcd._base_path = '/service/nocluster' cluster = self.etcd.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsNone(cluster.leader) def test_current_leader(self): self.assertIsInstance(self.etcd.current_leader(), Leader) self.etcd._base_path = '/service/noleader' self.assertIsNone(self.etcd.current_leader()) def test_touch_member(self): self.assertFalse(self.etcd.touch_member('', '')) def test_take_leader(self): self.assertFalse(self.etcd.take_leader()) def testattempt_to_acquire_leader(self): self.etcd._base_path = '/service/exists' self.assertFalse(self.etcd.attempt_to_acquire_leader()) self.etcd._base_path = '/service/failed' self.assertFalse(self.etcd.attempt_to_acquire_leader()) def test_update_leader(self): self.assertTrue(self.etcd.update_leader(MockPostgresql())) def test_initialize(self): self.assertFalse(self.etcd.initialize()) def test_cancel_initializion(self): self.etcd.client.delete = etcd_delete self.assertFalse(self.etcd.cancel_initialization()) def test_delete_leader(self): self.etcd.client.delete = etcd_delete self.assertFalse(self.etcd.delete_leader()) def test_watch(self): self.etcd.client.watch = etcd_watch self.etcd.watch(100) self.etcd.get_cluster() self.etcd.watch(1.5) self.etcd.watch(4.5) self.etcd.watch(9.5) self.etcd.watch(100)
class TestCtl(unittest.TestCase): @patch('socket.getaddrinfo', socket_getaddrinfo) @patch.object(Client, 'machines') def setUp(self, mock_machines): mock_machines.__get__ = Mock(return_value=['http://*****:*****@patch('psycopg2.connect', psycopg2_connect) def test_get_cursor(self): c = get_cursor(get_cluster_initialized_without_leader(), role='master') assert c is None c = get_cursor(get_cluster_initialized_with_leader(), role='master') assert c is not None c = get_cursor(get_cluster_initialized_with_leader(), role='replica') # # MockCursor returns pg_is_in_recovery as false assert c is None c = get_cursor(get_cluster_initialized_with_leader(), role='any') assert c is not None def test_output_members(self): cluster = get_cluster_initialized_with_leader() output_members(cluster, name='abc', format='pretty') output_members(cluster, name='abc', format='json') output_members(cluster, name='abc', format='tsv') @patch('patroni.etcd.Etcd.get_cluster', Mock(return_value=get_cluster_initialized_with_leader())) @patch('patroni.etcd.Etcd.get_etcd_client', Mock(return_value=None)) @patch('patroni.etcd.Etcd.set_failover_value', Mock(return_value=None)) @patch('patroni.ctl.wait_for_leader', Mock(return_value=get_cluster_initialized_with_leader())) @patch('requests.get', requests_get) @patch('requests.post', requests_get) @patch('patroni.ctl.post_patroni', Mock(return_value=MockResponse())) def test_failover(self): runner = CliRunner() with patch('patroni.etcd.Etcd.get_cluster', Mock(return_value=get_cluster_initialized_with_leader())): result = runner.invoke(ctl, ['failover', 'dummy', '--dcs', '8.8.8.8'], input='''leader other y''') assert 'Failing over to new leader' in result.output result = runner.invoke(ctl, ['failover', 'dummy', '--dcs', '8.8.8.8'], input='''leader other N''') assert 'Aborting failover' in str(result.exception) result = runner.invoke(ctl, ['failover', 'dummy', '--dcs', '8.8.8.8'], input='''leader leader y''') assert 'target and source are the same' in str(result.exception) result = runner.invoke(ctl, ['failover', 'dummy', '--dcs', '8.8.8.8'], input='''leader Reality y''') assert 'Reality does not exist' in str(result.exception) result = runner.invoke(ctl, ['failover', 'dummy', '--force']) assert 'Failing over to new leader' in result.output result = runner.invoke(ctl, ['failover', 'dummy', '--dcs', '8.8.8.8'], input='dummy') assert 'is not the leader of cluster' in str(result.exception) with patch( 'patroni.etcd.Etcd.get_cluster', Mock(return_value=get_cluster_initialized_with_only_leader())): result = runner.invoke(ctl, ['failover', 'dummy', '--dcs', '8.8.8.8'], input='''leader other y''') assert 'No candidates found to failover to' in str( result.exception) with patch( 'patroni.etcd.Etcd.get_cluster', Mock(return_value=get_cluster_initialized_without_leader())): result = runner.invoke(ctl, ['failover', 'dummy', '--dcs', '8.8.8.8'], input='''leader other y''') assert 'This cluster has no master' in str(result.exception) with patch('patroni.ctl.post_patroni', Mock(side_effect=Exception())): result = runner.invoke(ctl, ['failover', 'dummy', '--dcs', '8.8.8.8'], input='''leader other y''') assert 'falling back to DCS' in result.output assert 'Failover failed' in result.output mocked = Mock() mocked.return_value.status_code = 500 with patch('patroni.ctl.post_patroni', Mock(return_value=mocked)): result = runner.invoke(ctl, ['failover', 'dummy', '--dcs', '8.8.8.8'], input='''leader other y''') assert 'Failover failed, details' in result.output # with patch('patroni.dcs.AbstractDCS.get_cluster', Mock(return_value=get_cluster_initialized_with_leader())): # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8'], input='nonsense') # assert 'is not the leader of cluster' in str(result.exception) # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8', '--master', 'nonsense']) # assert 'is not the leader of cluster' in str(result.exception) # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8'], input='leader\nother\nn') # assert 'Aborting failover' in str(result.exception) # with patch('patroni.ctl.wait_for_leader', Mock(return_value = get_cluster_initialized_with_leader())): # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8'], input='leader\nother\nY') # assert 'master did not change after' in result.output # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8'], input='leader\nother\nY') # assert 'Failover failed' in result.output def test_(self): self.assertRaises(patroni.exceptions.PatroniCtlException, get_dcs, {'scheme': 'dummy'}, 'dummy') @patch('psycopg2.connect', psycopg2_connect) @patch('patroni.ctl.query_member', Mock(return_value=([['mock column']], None))) def test_query(self): runner = CliRunner() with patch('patroni.ctl.get_dcs', Mock(return_value=self.e)): result = runner.invoke(ctl, [ 'query', 'alpha', '--member', 'abc', '--role', 'master', ]) assert 'mutually exclusive' in str(result.exception) with runner.isolated_filesystem(): dummy_file = open('dummy', 'w') dummy_file.write('SELECT 1') dummy_file.close() result = runner.invoke(ctl, [ 'query', 'alpha', '--file', 'dummy', '--command', 'dummy', ]) assert 'mutually exclusive' in str(result.exception) result = runner.invoke(ctl, ['query', 'alpha', '--file', 'dummy']) os.remove('dummy') result = runner.invoke(ctl, ['query', 'alpha', '--command', 'SELECT 1']) assert 'mock column' in result.output @patch('patroni.ctl.get_cursor', Mock(return_value=MockConnect().cursor())) def test_query_member(self): rows = query_member(None, None, None, 'master', 'SELECT pg_is_in_recovery()') assert 'False' in str(rows) rows = query_member(None, None, None, 'replica', 'SELECT pg_is_in_recovery()') assert rows == (None, None) with patch('patroni.ctl.get_cursor', Mock(return_value=None)): rows = query_member(None, None, None, None, 'SELECT pg_is_in_recovery()') assert 'No connection to' in str(rows) rows = query_member(None, None, None, 'replica', 'SELECT pg_is_in_recovery()') assert 'No connection to' in str(rows) with patch('patroni.ctl.get_cursor', Mock(side_effect=psycopg2.OperationalError('bla'))): rows = query_member(None, None, None, 'replica', 'SELECT pg_is_in_recovery()') with patch('test_postgresql.MockCursor.execute', Mock(side_effect=psycopg2.OperationalError('bla'))): rows = query_member(None, None, None, 'replica', 'SELECT pg_is_in_recovery()') @patch('patroni.dcs.AbstractDCS.get_cluster', Mock(return_value=get_cluster_initialized_with_leader())) def test_dsn(self): runner = CliRunner() with patch('patroni.ctl.get_dcs', Mock(return_value=self.e)): result = runner.invoke(ctl, ['dsn', 'alpha', '--dcs', '8.8.8.8']) assert 'host=127.0.0.1 port=5435' in result.output result = runner.invoke(ctl, [ 'dsn', 'alpha', '--role', 'master', '--member', 'dummy', ]) assert 'mutually exclusive' in str(result.exception) result = runner.invoke(ctl, ['dsn', 'alpha', '--member', 'dummy']) assert 'Can not find' in str(result.exception) # result = runner.invoke(ctl, ['dsn', 'alpha', '--dcs', '8.8.8.8', '--role', 'replica']) # assert 'host=127.0.0.1 port=5436' in result.output @patch('patroni.etcd.Etcd.get_cluster', Mock(return_value=get_cluster_initialized_with_leader())) @patch('patroni.etcd.Etcd.get_etcd_client', Mock(return_value=None)) @patch('requests.get', requests_get) @patch('requests.post', requests_get) def test_restart_reinit(self): runner = CliRunner() result = runner.invoke(ctl, ['restart', 'alpha', '--dcs', '8.8.8.8'], input='y') result = runner.invoke(ctl, ['reinit', 'alpha', '--dcs', '8.8.8.8'], input='y') result = runner.invoke(ctl, ['restart', 'alpha', '--dcs', '8.8.8.8'], input='N') result = runner.invoke(ctl, [ 'restart', 'alpha', '--dcs', '8.8.8.8', 'dummy', '--any', ], input='y') assert 'not a member' in str(result.exception) with patch('requests.post', Mock(return_value=MockResponse())): result = runner.invoke(ctl, ['restart', 'alpha', '--dcs', '8.8.8.8'], input='y') @patch('patroni.etcd.Etcd.get_cluster', Mock(return_value=get_cluster_initialized_with_leader())) @patch('patroni.etcd.Etcd.get_etcd_client', Mock(return_value=None)) def test_remove(self): runner = CliRunner() result = runner.invoke(ctl, ['remove', 'alpha', '--dcs', '8.8.8.8'], input='alpha\nslave') assert 'Please confirm' in result.output assert 'You are about to remove all' in result.output assert 'You did not exactly type' in str(result.exception) result = runner.invoke(ctl, ['remove', 'alpha', '--dcs', '8.8.8.8'], input='''alpha Yes I am aware slave''') assert 'You did not specify the current master of the cluster' in str( result.exception) result = runner.invoke(ctl, ['remove', 'alpha', '--dcs', '8.8.8.8'], input='beta\nleader') assert 'Cluster names specified do not match' in str(result.exception) with patch('patroni.etcd.Etcd.get_cluster', get_cluster_initialized_with_leader): result = runner.invoke(ctl, ['remove', 'alpha', '--dcs', '8.8.8.8'], input='''alpha Yes I am aware leader''') assert 'object has no attribute' in str(result.exception) with patch('patroni.ctl.get_dcs', Mock(return_value=Mock())): result = runner.invoke(ctl, ['remove', 'alpha', '--dcs', '8.8.8.8'], input='''alpha Yes I am aware leader''') assert 'We have not implemented this for DCS of type' in str( result.exception) @patch('patroni.etcd.Etcd.watch', Mock(return_value=None)) @patch('patroni.etcd.Etcd.get_cluster', Mock(return_value=get_cluster_initialized_with_leader())) def test_wait_for_leader(self): dcs = self.e self.assertRaises(patroni.exceptions.PatroniCtlException, wait_for_leader, dcs, 0) cluster = wait_for_leader(dcs=dcs, timeout=2) assert cluster.leader.member.name == 'leader' def test_post_patroni(self): member = get_cluster_initialized_with_leader().leader.member self.assertRaises(requests.exceptions.ConnectionError, post_patroni, member, 'dummy', {}) def test_ctl(self): runner = CliRunner() runner.invoke(ctl, ['list']) result = runner.invoke(ctl, ['--help']) assert 'Usage:' in result.output def test_get_any_member(self): m = get_any_member(get_cluster_initialized_without_leader(), role='master') assert m is None m = get_any_member(get_cluster_initialized_with_leader(), role='master') assert m.name == 'leader' def test_get_all_members(self): r = list( get_all_members(get_cluster_initialized_without_leader(), role='master')) assert len(r) == 0 r = list( get_all_members(get_cluster_initialized_with_leader(), role='master')) assert len(r) == 1 assert r[0].name == 'leader' r = list( get_all_members(get_cluster_initialized_with_leader(), role='replica')) assert len(r) == 1 assert r[0].name == 'other' r = list( get_all_members(get_cluster_initialized_without_leader(), role='replica')) assert len(r) == 2 @patch('patroni.etcd.Etcd.get_cluster', Mock(return_value=get_cluster_initialized_with_leader())) @patch('patroni.etcd.Etcd.get_etcd_client', Mock(return_value=None)) @patch('requests.get', requests_get) @patch('requests.post', requests_get) def test_members(self): runner = CliRunner() result = runner.invoke(members, ['alpha']) assert result.exit_code == 0 def test_configure(self): runner = CliRunner() result = runner.invoke(configure, [ '--dcs', 'abc', '-c', 'dummy', '-n', 'bla', ]) assert result.exit_code == 0
class TestHa(unittest.TestCase): @patch('socket.getaddrinfo', socket_getaddrinfo) @patch.object(Client, 'machines') def setUp(self, mock_machines): mock_machines.__get__ = Mock(return_value=['http://*****:*****@patch('sys.exit', return_value=1) @patch('patroni.ha.Ha.sysid_valid', MagicMock(return_value=True)) def test_sysid_no_match(self, exit_mock): self.ha.run_cycle() exit_mock.assert_called_once_with(1) @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_start_as_readonly(self): self.p.is_leader = self.p.is_healthy = false self.ha.has_lock = true self.assertEquals( self.ha.run_cycle(), 'promoted self to leader because i had the session lock') def test_acquire_lock_as_master(self): self.assertEquals(self.ha.run_cycle(), 'acquired session lock as a leader') def test_promoted_by_acquiring_lock(self): self.ha.is_healthiest_node = true self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') def test_demote_after_failing_to_obtain_lock(self): self.ha.acquire_lock = false self.assertEquals( self.ha.run_cycle(), 'demoted self due after trying and failing to obtain lock') def test_follow_new_leader_after_failing_to_obtain_lock(self): self.ha.is_healthiest_node = true self.ha.acquire_lock = false self.p.is_leader = false self.assertEquals( self.ha.run_cycle(), 'following new leader after trying and failing to obtain lock') def test_demote_because_not_healthiest(self): self.ha.is_healthiest_node = false self.assertEquals( self.ha.run_cycle(), 'demoting self because i am not the healthiest node') def test_follow_new_leader_because_not_healthiest(self): self.ha.is_healthiest_node = false self.p.is_leader = false self.assertEquals( self.ha.run_cycle(), 'following a different leader because i am not the healthiest node' ) def test_promote_because_have_lock(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.p.is_leader = false self.assertEquals( self.ha.run_cycle(), 'promoted self to leader because i had the session lock') def test_leader_with_lock(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') def test_demote_because_not_having_lock(self): self.ha.cluster.is_unlocked = false self.assertEquals( self.ha.run_cycle(), 'demoting self because i do not have the lock and i was a leader') def test_demote_because_update_lock_failed(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.ha.update_lock = false self.assertEquals( self.ha.run_cycle(), 'demoting self because i do not have the lock and i was a leader') def test_follow_the_leader(self): self.ha.cluster.is_unlocked = false self.p.is_leader = false self.assertEquals( self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader') def test_no_etcd_connection_master_demote(self): self.ha.load_cluster_from_dcs = Mock( side_effect=DCSError('Etcd is not responding properly')) self.assertEquals( self.ha.run_cycle(), 'demoted self because DCS is not accessible and i was a leader') def test_bootstrap_from_leader(self): self.ha.cluster = get_cluster_initialized_with_leader() self.p.bootstrap = false self.assertEquals(self.ha.bootstrap(), 'trying to bootstrap from leader') def test_bootstrap_waiting_for_leader(self): self.ha.cluster = get_cluster_initialized_without_leader() self.assertEquals(self.ha.bootstrap(), 'waiting for leader to bootstrap') def test_bootstrap_initialize_lock_failed(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.assertEquals(self.ha.bootstrap(), 'failed to acquire initialize lock') def test_bootstrap_initialized_new_cluster(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.assertEquals(self.ha.bootstrap(), 'initialized a new cluster') def test_bootstrap_release_initialize_key_on_failure(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.p.bootstrap = Mock(side_effect=PostgresException( "Could not bootstrap master PostgreSQL")) self.assertRaises(PostgresException, self.ha.bootstrap) def test_reinitialize(self): self.ha.schedule_reinitialize() self.ha.schedule_reinitialize() self.ha.run_cycle() self.assertIsNone(self.ha._async_executor.scheduled_action) self.ha.cluster = get_cluster_initialized_with_leader() self.ha.has_lock = true self.ha.schedule_reinitialize() self.ha.run_cycle() self.assertIsNone(self.ha._async_executor.scheduled_action) self.ha.has_lock = false self.ha.schedule_reinitialize() self.ha.run_cycle() def test_restart(self): self.assertEquals(self.ha.restart(), (True, 'restarted successfully')) self.p.restart = false self.assertEquals(self.ha.restart(), (False, 'restart failed')) self.ha.schedule_reinitialize() self.assertEquals(self.ha.restart(), (False, 'reinitialize already in progress')) def test_restart_in_progress(self): self.ha._async_executor.schedule('restart', True) self.assertTrue(self.ha.restart_scheduled()) self.assertEquals(self.ha.run_cycle(), 'not healthy enough for leader race') self.ha.cluster = get_cluster_initialized_with_leader() self.assertEquals(self.ha.run_cycle(), 'restart in progress') self.ha.has_lock = true self.assertEquals(self.ha.run_cycle(), 'updated leader lock during restart') self.ha.update_lock = false self.assertEquals(self.ha.run_cycle(), 'failed to update leader lock during restart') @patch('requests.get', requests_get) def test_manual_failover_from_leader(self): self.ha.has_lock = true self.ha.cluster = get_cluster_initialized_with_leader( Failover(0, 'blabla', '')) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.cluster = get_cluster_initialized_with_leader( Failover(0, '', MockPostgresql.name)) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.cluster = get_cluster_initialized_with_leader( Failover(0, '', 'blabla')) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') f = Failover(0, MockPostgresql.name, '') self.ha.cluster = get_cluster_initialized_with_leader(f) self.assertEquals(self.ha.run_cycle(), 'manual failover: demoting myself') self.ha.fetch_node_status = lambda e: (e, True, True, 0, { 'nofailover': 'True' }) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') # manual failover from the previous leader to us won't happen if we hold the nofailover flag self.ha.cluster = get_cluster_initialized_with_leader( Failover(0, 'blabla', MockPostgresql.name)) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') @patch('requests.get', requests_get) def test_manual_failover_process_no_leader(self): self.p.is_leader = false self.ha.cluster = get_cluster_initialized_without_leader( failover=Failover(0, '', MockPostgresql.name)) self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.ha.cluster = get_cluster_initialized_without_leader( failover=Failover(0, '', 'leader')) self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.ha.fetch_node_status = lambda e: (e, True, True, 0, {} ) # accessible, in_recovery self.assertEquals( self.ha.run_cycle(), 'following a different leader because i am not the healthiest node' ) self.ha.cluster = get_cluster_initialized_without_leader( failover=Failover(0, MockPostgresql.name, '')) self.assertEquals( self.ha.run_cycle(), 'following a different leader because i am not the healthiest node' ) self.ha.fetch_node_status = lambda e: (e, False, True, 0, {} ) # inaccessible, in_recovery self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') # set failover flag to True for all members of the cluster # this should elect the current member, as we are not going to call the API for it. self.ha.cluster = get_cluster_initialized_without_leader( failover=Failover(0, '', 'other')) self.ha.fetch_node_status = lambda e: (e, True, True, 0, { 'nofailover': 'True' }) # accessible, in_recovery self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') # same as previous, but set the current member to nofailover. In no case it should be elected as a leader self.ha.patroni.nofailover = True self.assertEquals( self.ha.run_cycle(), 'following a different leader because I am not allowed to promote') def test_is_healthiest_node(self): self.ha.state_handler.is_leader = false self.ha.patroni.nofailover = False self.ha.fetch_node_status = lambda e: (e, True, True, 0, {}) self.assertTrue(self.ha.is_healthiest_node()) def test__is_healthiest_node(self): self.assertTrue( self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.p.is_leader = false self.ha.fetch_node_status = lambda e: (e, True, True, 0, {} ) # accessible, in_recovery self.assertTrue( self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = lambda e: (e, True, False, 0, {} ) # accessible, not in_recovery self.assertFalse( self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = lambda e: (e, True, True, 1, { }) # accessible, in_recovery, xlog location ahead self.assertFalse( self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.p.check_replication_lag = false self.assertFalse( self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.patroni.nofailover = True self.assertFalse( self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.patroni.nofailover = False @patch('requests.get', requests_get) def test_fetch_node_status(self): member = Member(0, 'test', 1, {'api_url': 'http://127.0.0.1:8011/patroni'}) self.ha.fetch_node_status(member) member = Member(0, 'test', 1, {'api_url': 'http://localhost:8011/patroni'}) self.ha.fetch_node_status(member)
class TestHa(unittest.TestCase): @patch('socket.getaddrinfo', socket_getaddrinfo) @patch.object(Client, 'machines') def setUp(self, mock_machines): mock_machines.__get__ = Mock(return_value=['http://*****:*****@patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_start_as_readonly(self): self.p.is_leader = self.p.is_healthy = false self.ha.has_lock = true self.assertEquals(self.ha.run_cycle(), 'promoted self to leader because i had the session lock') def test_acquire_lock_as_master(self): self.assertEquals(self.ha.run_cycle(), 'acquired session lock as a leader') def test_promoted_by_acquiring_lock(self): self.ha.is_healthiest_node = true self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') def test_demote_after_failing_to_obtain_lock(self): self.ha.acquire_lock = false self.assertEquals(self.ha.run_cycle(), 'demoted self due after trying and failing to obtain lock') def test_follow_new_leader_after_failing_to_obtain_lock(self): self.ha.is_healthiest_node = true self.ha.acquire_lock = false self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'following new leader after trying and failing to obtain lock') def test_demote_because_not_healthiest(self): self.ha.is_healthiest_node = false self.assertEquals(self.ha.run_cycle(), 'demoting self because i am not the healthiest node') def test_follow_new_leader_because_not_healthiest(self): self.ha.is_healthiest_node = false self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') def test_promote_because_have_lock(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'promoted self to leader because i had the session lock') def test_leader_with_lock(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') def test_demote_because_not_having_lock(self): self.ha.cluster.is_unlocked = false self.assertEquals(self.ha.run_cycle(), 'demoting self because i do not have the lock and i was a leader') def test_demote_because_update_lock_failed(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.ha.update_lock = false self.assertEquals(self.ha.run_cycle(), 'demoting self because i do not have the lock and i was a leader') def test_follow_the_leader(self): self.ha.cluster.is_unlocked = false self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader') def test_no_etcd_connection_master_demote(self): self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.assertEquals(self.ha.run_cycle(), 'demoted self because DCS is not accessible and i was a leader') def test_bootstrap_from_leader(self): self.ha.cluster = get_cluster_initialized_with_leader() self.p.bootstrap = false self.assertEquals(self.ha.bootstrap(), 'trying to bootstrap from leader') def test_bootstrap_waiting_for_leader(self): self.ha.cluster = get_cluster_initialized_without_leader() self.assertEquals(self.ha.bootstrap(), 'waiting for leader to bootstrap') def test_bootstrap_initialize_lock_failed(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.assertEquals(self.ha.bootstrap(), 'failed to acquire initialize lock') def test_bootstrap_initialized_new_cluster(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.assertEquals(self.ha.bootstrap(), 'initialized a new cluster') def test_bootstrap_release_initialize_key_on_failure(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.p.bootstrap = Mock(side_effect=PostgresException("Could not bootstrap master PostgreSQL")) self.assertRaises(PostgresException, self.ha.bootstrap) def test_reinitialize(self): self.ha.schedule_reinitialize() self.ha.schedule_reinitialize() self.ha.run_cycle() self.assertIsNone(self.ha._async_executor.scheduled_action) self.ha.cluster = get_cluster_initialized_with_leader() self.ha.has_lock = true self.ha.schedule_reinitialize() self.ha.run_cycle() self.assertIsNone(self.ha._async_executor.scheduled_action) self.ha.has_lock = false self.ha.schedule_reinitialize() self.ha.run_cycle() def test_restart(self): self.assertEquals(self.ha.restart(), (True, 'restarted successfully')) self.p.restart = false self.assertEquals(self.ha.restart(), (False, 'restart failed')) self.ha.schedule_reinitialize() self.assertEquals(self.ha.restart(), (False, 'reinitialize already in progress')) def test_restart_in_progress(self): self.ha._async_executor.schedule('restart', True) self.assertTrue(self.ha.restart_scheduled()) self.assertEquals(self.ha.run_cycle(), 'not healthy enough for leader race') self.ha.cluster = get_cluster_initialized_with_leader() self.assertEquals(self.ha.run_cycle(), 'restart in progress') self.ha.has_lock = true self.assertEquals(self.ha.run_cycle(), 'updated leader lock during restart') self.ha.update_lock = false self.assertEquals(self.ha.run_cycle(), 'failed to update leader lock during restart') @patch('requests.get', requests_get) def test_manual_failover_from_leader(self): self.ha.has_lock = true self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', '')) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', MockPostgresql.name)) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', 'blabla')) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') f = Failover(0, MockPostgresql.name, '') self.ha.cluster = get_cluster_initialized_with_leader(f) self.assertEquals(self.ha.run_cycle(), 'manual failover: demoting myself') @patch('requests.get', requests_get) def test_manual_failover_process_no_leader(self): self.p.is_leader = false self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', MockPostgresql.name)) self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'leader')) self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.ha.fetch_node_status = lambda e: (e, True, True, 0) # accessible, in_recovery self.assertEquals(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, MockPostgresql.name, '')) self.assertEquals(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') self.ha.fetch_node_status = lambda e: (e, False, True, 0) # accessible, in_recovery self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') def test__is_healthiest_node(self): self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.p.is_leader = false self.ha.fetch_node_status = lambda e: (e, True, True, 0) # accessible, in_recovery self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = lambda e: (e, True, False, 0) # accessible, not in_recovery self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = lambda e: (e, True, True, 1) # accessible, in_recovery, xlog location ahead self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.p.check_replication_lag = false self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) @patch('requests.get', requests_get) def test_fetch_node_status(self): member = Member(0, 'test', 1, {'api_url': 'http://127.0.0.1:8011/patroni'}) self.ha.fetch_node_status(member) member = Member(0, 'test', 1, {'api_url': 'http://localhost:8011/patroni'}) self.ha.fetch_node_status(member)
class TestEtcd(unittest.TestCase): def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://*****:*****@patch('dns.resolver.query', dns_query) def test_get_etcd_client(self): with patch.object(etcd.Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(side_effect=etcd.EtcdException) with patch('time.sleep', Mock(side_effect=SleepException())): self.assertRaises(SleepException, self.etcd.get_etcd_client, {'discovery_srv': 'test'}) def test_get_cluster(self): self.assertIsInstance(self.etcd.get_cluster(), Cluster) self.etcd._base_path = '/service/nocluster' cluster = self.etcd.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsNone(cluster.leader) def test_current_leader(self): self.assertIsInstance(self.etcd.current_leader(), Leader) self.etcd._base_path = '/service/noleader' self.assertIsNone(self.etcd.current_leader()) def test_touch_member(self): self.assertFalse(self.etcd.touch_member('', '')) def test_take_leader(self): self.assertFalse(self.etcd.take_leader()) def test_attempt_to_acquire_leader(self): self.etcd._base_path = '/service/exists' self.assertFalse(self.etcd.attempt_to_acquire_leader()) self.etcd._base_path = '/service/failed' self.assertFalse(self.etcd.attempt_to_acquire_leader()) def test_write_leader_optime(self): self.etcd.write_leader_optime('0') def test_update_leader(self): self.assertTrue(self.etcd.update_leader()) def test_initialize(self): self.assertFalse(self.etcd.initialize()) def test_cancel_initializion(self): self.assertFalse(self.etcd.cancel_initialization()) def test_delete_leader(self): self.assertFalse(self.etcd.delete_leader()) def test_watch(self): self.etcd.client.watch = etcd_watch self.etcd.watch(0) self.etcd.get_cluster() self.etcd.watch(1.5) self.etcd.watch(4.5) self.etcd.watch(9.5) self.etcd.watch(100) @patch('patroni.etcd.Etcd.retry', Mock(side_effect=AttributeError("foo"))) def test_other_exceptions(self): self.assertRaises(EtcdError, self.etcd.cancel_initialization)
def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://localhost:2379', 'http://localhost:4001']) self.etcd = Etcd('foo', {'namespace': '/patroni/', 'ttl': 30, 'host': 'localhost:2379', 'scope': 'test'})
class TestHa(unittest.TestCase): @patch('socket.getaddrinfo', socket_getaddrinfo) @patch.object(etcd.Client, 'read', etcd_read) def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=['http://*****:*****@patch('sys.exit', return_value=1) @patch('patroni.ha.Ha.sysid_valid', MagicMock(return_value=True)) def test_sysid_no_match(self, exit_mock): self.ha.run_cycle() exit_mock.assert_called_once_with(1) @patch.object(Cluster, 'is_unlocked', Mock(return_value=False)) def test_start_as_readonly(self): self.p.is_leader = false self.p.is_healthy = true self.ha.has_lock = true self.assertEquals(self.ha.run_cycle(), 'promoted self to leader because i had the session lock') def test_acquire_lock_as_master(self): self.assertEquals(self.ha.run_cycle(), 'acquired session lock as a leader') def test_promoted_by_acquiring_lock(self): self.ha.is_healthiest_node = true self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') def test_demote_after_failing_to_obtain_lock(self): self.ha.acquire_lock = false self.assertEquals(self.ha.run_cycle(), 'demoted self after trying and failing to obtain lock') def test_follow_new_leader_after_failing_to_obtain_lock(self): self.ha.is_healthiest_node = true self.ha.acquire_lock = false self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'following new leader after trying and failing to obtain lock') def test_demote_because_not_healthiest(self): self.ha.is_healthiest_node = false self.assertEquals(self.ha.run_cycle(), 'demoting self because i am not the healthiest node') def test_follow_new_leader_because_not_healthiest(self): self.ha.is_healthiest_node = false self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') def test_promote_because_have_lock(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'promoted self to leader because i had the session lock') def test_leader_with_lock(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') def test_demote_because_not_having_lock(self): self.ha.cluster.is_unlocked = false self.assertEquals(self.ha.run_cycle(), 'demoting self because i do not have the lock and i was a leader') def test_demote_because_update_lock_failed(self): self.ha.cluster.is_unlocked = false self.ha.has_lock = true self.ha.update_lock = false self.assertEquals(self.ha.run_cycle(), 'demoting self because i do not have the lock and i was a leader') def test_follow(self): self.ha.cluster.is_unlocked = false self.p.is_leader = false self.assertEquals(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader') self.ha.patroni.replicatefrom = "foo" self.assertEquals(self.ha.run_cycle(), 'no action. i am a secondary and i am following a leader') def test_no_etcd_connection_master_demote(self): self.ha.load_cluster_from_dcs = Mock(side_effect=DCSError('Etcd is not responding properly')) self.assertEquals(self.ha.run_cycle(), 'demoted self because DCS is not accessible and i was a leader') def test_bootstrap_from_another_member(self): self.ha.cluster = get_cluster_initialized_with_leader() self.assertEquals(self.ha.bootstrap(), 'trying to bootstrap from replica \'other\'') def test_bootstrap_waiting_for_leader(self): self.ha.cluster = get_cluster_initialized_without_leader() self.assertEquals(self.ha.bootstrap(), 'waiting for leader to bootstrap') def test_bootstrap_without_leader(self): self.ha.cluster = get_cluster_initialized_without_leader() self.p.can_create_replica_without_replication_connection = MagicMock(return_value=True) self.assertEquals(self.ha.bootstrap(), 'trying to bootstrap (without leader)') def test_bootstrap_initialize_lock_failed(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.assertEquals(self.ha.bootstrap(), 'failed to acquire initialize lock') def test_bootstrap_initialized_new_cluster(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.assertEquals(self.ha.bootstrap(), 'initialized a new cluster') def test_bootstrap_release_initialize_key_on_failure(self): self.ha.cluster = get_cluster_not_initialized_without_leader() self.e.initialize = true self.p.bootstrap = Mock(side_effect=PostgresException("Could not bootstrap master PostgreSQL")) self.assertRaises(PostgresException, self.ha.bootstrap) def test_reinitialize(self): self.ha.schedule_reinitialize() self.ha.schedule_reinitialize() self.ha.run_cycle() self.assertIsNone(self.ha._async_executor.scheduled_action) self.ha.cluster = get_cluster_initialized_with_leader() self.ha.has_lock = true self.ha.schedule_reinitialize() self.ha.run_cycle() self.assertIsNone(self.ha._async_executor.scheduled_action) self.ha.has_lock = false self.ha.schedule_reinitialize() self.ha.run_cycle() def test_restart(self): self.assertEquals(self.ha.restart(), (True, 'restarted successfully')) self.p.restart = false self.assertEquals(self.ha.restart(), (False, 'restart failed')) self.ha.schedule_reinitialize() self.assertEquals(self.ha.restart(), (False, 'reinitialize already in progress')) def test_restart_in_progress(self): self.ha._async_executor.schedule('restart', True) self.assertTrue(self.ha.restart_scheduled()) self.assertEquals(self.ha.run_cycle(), 'not healthy enough for leader race') self.ha.cluster = get_cluster_initialized_with_leader() self.assertEquals(self.ha.run_cycle(), 'restart in progress') self.ha.has_lock = true self.assertEquals(self.ha.run_cycle(), 'updated leader lock during restart') self.ha.update_lock = false self.assertEquals(self.ha.run_cycle(), 'failed to update leader lock during restart') @patch('requests.get', requests_get) @patch('time.sleep', Mock()) def test_manual_failover_from_leader(self): self.ha.has_lock = true self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', '', None)) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', self.p.name, None)) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, '', 'blabla', None)) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') f = Failover(0, self.p.name, '', None) self.ha.cluster = get_cluster_initialized_with_leader(f) self.assertEquals(self.ha.run_cycle(), 'manual failover: demoting myself') self.ha.fetch_node_status = lambda e: (e, True, True, 0, {'nofailover': 'True'}) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') # manual failover from the previous leader to us won't happen if we hold the nofailover flag self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, None)) self.assertEquals(self.ha.run_cycle(), 'no action. i am the leader with the lock') # Failover scheduled time must include timezone scheduled = datetime.datetime.now() self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.ha.run_cycle() scheduled = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.assertEquals('no action. i am the leader with the lock', self.ha.run_cycle()) scheduled = scheduled + datetime.timedelta(seconds=30) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.assertEquals('no action. i am the leader with the lock', self.ha.run_cycle()) scheduled = scheduled + datetime.timedelta(seconds=-600) self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.assertEquals('no action. i am the leader with the lock', self.ha.run_cycle()) scheduled = None self.ha.cluster = get_cluster_initialized_with_leader(Failover(0, 'blabla', self.p.name, scheduled)) self.assertEquals('no action. i am the leader with the lock', self.ha.run_cycle()) @patch('requests.get', requests_get) def test_manual_failover_process_no_leader(self): self.p.is_leader = false self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', self.p.name, None)) self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'leader', None)) self.p.set_role('replica') self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') self.ha.fetch_node_status = lambda e: (e, True, True, 0, {}) # accessible, in_recovery self.assertEquals(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, self.p.name, '', None)) self.assertEquals(self.ha.run_cycle(), 'following a different leader because i am not the healthiest node') self.ha.fetch_node_status = lambda e: (e, False, True, 0, {}) # inaccessible, in_recovery self.p.set_role('replica') self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') # set failover flag to True for all members of the cluster # this should elect the current member, as we are not going to call the API for it. self.ha.cluster = get_cluster_initialized_without_leader(failover=Failover(0, '', 'other', None)) self.ha.fetch_node_status = lambda e: (e, True, True, 0, {'nofailover': 'True'}) # accessible, in_recovery self.p.set_role('replica') self.assertEquals(self.ha.run_cycle(), 'promoted self to leader by acquiring session lock') # same as previous, but set the current member to nofailover. In no case it should be elected as a leader self.ha.patroni.nofailover = True self.assertEquals(self.ha.run_cycle(), 'following a different leader because I am not allowed to promote') def test_is_healthiest_node(self): self.ha.state_handler.is_leader = false self.ha.patroni.nofailover = False self.ha.fetch_node_status = lambda e: (e, True, True, 0, {}) self.assertTrue(self.ha.is_healthiest_node()) def test__is_healthiest_node(self): self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.p.is_leader = false self.ha.fetch_node_status = lambda e: (e, True, True, 0, {}) # accessible, in_recovery self.assertTrue(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = lambda e: (e, True, False, 0, {}) # accessible, not in_recovery self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.fetch_node_status = lambda e: (e, True, True, 1, {}) # accessible, in_recovery, xlog location ahead self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.p.check_replication_lag = false self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.patroni.nofailover = True self.assertFalse(self.ha._is_healthiest_node(self.ha.old_cluster.members)) self.ha.patroni.nofailover = False @patch('requests.get', requests_get) def test_fetch_node_status(self): member = Member(0, 'test', 1, {'api_url': 'http://127.0.0.1:8011/patroni'}) self.ha.fetch_node_status(member) member = Member(0, 'test', 1, {'api_url': 'http://localhost:8011/patroni'}) self.ha.fetch_node_status(member) def test_post_recover(self): self.p.is_running = false self.ha.has_lock = true self.assertEqual(self.ha.post_recover(), 'removed leader key after trying and failing to start postgres') self.ha.has_lock = false self.assertEqual(self.ha.post_recover(), 'failed to start postgres') self.p.is_running = true self.assertIsNone(self.ha.post_recover())
class TestCtl(unittest.TestCase): @patch("socket.getaddrinfo", socket_getaddrinfo) @patch.object(Client, "machines") def setUp(self, mock_machines): mock_machines.__get__ = Mock(return_value=["http://*****:*****@patch("psycopg2.connect", psycopg2_connect) def test_get_cursor(self): c = get_cursor(get_cluster_initialized_without_leader(), role="master") assert c is None c = get_cursor(get_cluster_initialized_with_leader(), role="master") assert c is not None c = get_cursor(get_cluster_initialized_with_leader(), role="replica") # # MockCursor returns pg_is_in_recovery as false assert c is None c = get_cursor(get_cluster_initialized_with_leader(), role="any") assert c is not None def test_output_members(self): cluster = get_cluster_initialized_with_leader() output_members(cluster, name="abc", format="pretty") output_members(cluster, name="abc", format="json") output_members(cluster, name="abc", format="tsv") @patch("patroni.etcd.Etcd.get_cluster", Mock(return_value=get_cluster_initialized_with_leader())) @patch("patroni.etcd.Etcd.get_etcd_client", Mock(return_value=None)) @patch("patroni.etcd.Etcd.set_failover_value", Mock(return_value=None)) @patch("patroni.ctl.wait_for_leader", Mock(return_value=get_cluster_initialized_with_leader())) @patch("requests.get", requests_get) @patch("requests.post", requests_get) @patch("patroni.ctl.post_patroni", Mock(return_value=MockResponse())) def test_failover(self): runner = CliRunner() with patch("patroni.etcd.Etcd.get_cluster", Mock(return_value=get_cluster_initialized_with_leader())): result = runner.invoke( ctl, ["failover", "dummy", "--dcs", "8.8.8.8"], input="""leader other y""", ) assert "Failing over to new leader" in result.output result = runner.invoke( ctl, ["failover", "dummy", "--dcs", "8.8.8.8"], input="""leader other N""", ) assert "Aborting failover" in str(result.exception) result = runner.invoke( ctl, ["failover", "dummy", "--dcs", "8.8.8.8"], input="""leader leader y""", ) assert "target and source are the same" in str(result.exception) result = runner.invoke( ctl, ["failover", "dummy", "--dcs", "8.8.8.8"], input="""leader Reality y""", ) assert "Reality does not exist" in str(result.exception) result = runner.invoke(ctl, ["failover", "dummy", "--force"]) assert "Failing over to new leader" in result.output result = runner.invoke(ctl, ["failover", "dummy", "--dcs", "8.8.8.8"], input="dummy") assert "is not the leader of cluster" in str(result.exception) with patch("patroni.etcd.Etcd.get_cluster", Mock(return_value=get_cluster_initialized_with_only_leader())): result = runner.invoke( ctl, ["failover", "dummy", "--dcs", "8.8.8.8"], input="""leader other y""", ) assert "No candidates found to failover to" in str(result.exception) with patch("patroni.etcd.Etcd.get_cluster", Mock(return_value=get_cluster_initialized_without_leader())): result = runner.invoke( ctl, ["failover", "dummy", "--dcs", "8.8.8.8"], input="""leader other y""", ) assert "This cluster has no master" in str(result.exception) with patch("patroni.ctl.post_patroni", Mock(side_effect=Exception())): result = runner.invoke( ctl, ["failover", "dummy", "--dcs", "8.8.8.8"], input="""leader other y""", ) assert "falling back to DCS" in result.output assert "Failover failed" in result.output mocked = Mock() mocked.return_value.status_code = 500 with patch("patroni.ctl.post_patroni", Mock(return_value=mocked)): result = runner.invoke( ctl, ["failover", "dummy", "--dcs", "8.8.8.8"], input="""leader other y""", ) assert "Failover failed, details" in result.output # with patch('patroni.dcs.AbstractDCS.get_cluster', Mock(return_value=get_cluster_initialized_with_leader())): # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8'], input='nonsense') # assert 'is not the leader of cluster' in str(result.exception) # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8', '--master', 'nonsense']) # assert 'is not the leader of cluster' in str(result.exception) # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8'], input='leader\nother\nn') # assert 'Aborting failover' in str(result.exception) # with patch('patroni.ctl.wait_for_leader', Mock(return_value = get_cluster_initialized_with_leader())): # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8'], input='leader\nother\nY') # assert 'master did not change after' in result.output # result = runner.invoke(ctl, ['failover', 'alpha', '--dcs', '8.8.8.8'], input='leader\nother\nY') # assert 'Failover failed' in result.output def test_(self): self.assertRaises(patroni.exceptions.PatroniCtlException, get_dcs, {"scheme": "dummy"}, "dummy") @patch("psycopg2.connect", psycopg2_connect) @patch("patroni.ctl.query_member", Mock(return_value=([["mock column"]], None))) def test_query(self): runner = CliRunner() with patch("patroni.ctl.get_dcs", Mock(return_value=self.e)): result = runner.invoke(ctl, ["query", "alpha", "--member", "abc", "--role", "master"]) assert "mutually exclusive" in str(result.exception) with runner.isolated_filesystem(): dummy_file = open("dummy", "w") dummy_file.write("SELECT 1") dummy_file.close() result = runner.invoke(ctl, ["query", "alpha", "--file", "dummy", "--command", "dummy"]) assert "mutually exclusive" in str(result.exception) result = runner.invoke(ctl, ["query", "alpha", "--file", "dummy"]) os.remove("dummy") result = runner.invoke(ctl, ["query", "alpha", "--command", "SELECT 1"]) assert "mock column" in result.output @patch("patroni.ctl.get_cursor", Mock(return_value=MockConnect().cursor())) def test_query_member(self): rows = query_member(None, None, None, "master", "SELECT pg_is_in_recovery()") assert "False" in str(rows) rows = query_member(None, None, None, "replica", "SELECT pg_is_in_recovery()") assert rows == (None, None) with patch("patroni.ctl.get_cursor", Mock(return_value=None)): rows = query_member(None, None, None, None, "SELECT pg_is_in_recovery()") assert "No connection to" in str(rows) rows = query_member(None, None, None, "replica", "SELECT pg_is_in_recovery()") assert "No connection to" in str(rows) with patch("patroni.ctl.get_cursor", Mock(side_effect=psycopg2.OperationalError("bla"))): rows = query_member(None, None, None, "replica", "SELECT pg_is_in_recovery()") with patch("test_postgresql.MockCursor.execute", Mock(side_effect=psycopg2.OperationalError("bla"))): rows = query_member(None, None, None, "replica", "SELECT pg_is_in_recovery()") @patch("patroni.dcs.AbstractDCS.get_cluster", Mock(return_value=get_cluster_initialized_with_leader())) def test_dsn(self): runner = CliRunner() with patch("patroni.ctl.get_dcs", Mock(return_value=self.e)): result = runner.invoke(ctl, ["dsn", "alpha", "--dcs", "8.8.8.8"]) assert "host=127.0.0.1 port=5435" in result.output result = runner.invoke(ctl, ["dsn", "alpha", "--role", "master", "--member", "dummy"]) assert "mutually exclusive" in str(result.exception) result = runner.invoke(ctl, ["dsn", "alpha", "--member", "dummy"]) assert "Can not find" in str(result.exception) # result = runner.invoke(ctl, ['dsn', 'alpha', '--dcs', '8.8.8.8', '--role', 'replica']) # assert 'host=127.0.0.1 port=5436' in result.output @patch("patroni.etcd.Etcd.get_cluster", Mock(return_value=get_cluster_initialized_with_leader())) @patch("patroni.etcd.Etcd.get_etcd_client", Mock(return_value=None)) @patch("requests.get", requests_get) @patch("requests.post", requests_get) def test_restart_reinit(self): runner = CliRunner() result = runner.invoke(ctl, ["restart", "alpha", "--dcs", "8.8.8.8"], input="y") result = runner.invoke(ctl, ["reinit", "alpha", "--dcs", "8.8.8.8"], input="y") result = runner.invoke(ctl, ["restart", "alpha", "--dcs", "8.8.8.8"], input="N") result = runner.invoke(ctl, ["restart", "alpha", "--dcs", "8.8.8.8", "dummy", "--any"], input="y") assert "not a member" in str(result.exception) with patch("requests.post", Mock(return_value=MockResponse())): result = runner.invoke(ctl, ["restart", "alpha", "--dcs", "8.8.8.8"], input="y") @patch("patroni.etcd.Etcd.get_cluster", Mock(return_value=get_cluster_initialized_with_leader())) @patch("patroni.etcd.Etcd.get_etcd_client", Mock(return_value=None)) def test_remove(self): runner = CliRunner() result = runner.invoke(ctl, ["remove", "alpha", "--dcs", "8.8.8.8"], input="alpha\nslave") assert "Please confirm" in result.output assert "You are about to remove all" in result.output assert "You did not exactly type" in str(result.exception) result = runner.invoke( ctl, ["remove", "alpha", "--dcs", "8.8.8.8"], input="""alpha Yes I am aware slave""", ) assert "You did not specify the current master of the cluster" in str(result.exception) result = runner.invoke(ctl, ["remove", "alpha", "--dcs", "8.8.8.8"], input="beta\nleader") assert "Cluster names specified do not match" in str(result.exception) with patch("patroni.etcd.Etcd.get_cluster", get_cluster_initialized_with_leader): result = runner.invoke( ctl, ["remove", "alpha", "--dcs", "8.8.8.8"], input="""alpha Yes I am aware leader""", ) assert "object has no attribute" in str(result.exception) with patch("patroni.ctl.get_dcs", Mock(return_value=Mock())): result = runner.invoke( ctl, ["remove", "alpha", "--dcs", "8.8.8.8"], input="""alpha Yes I am aware leader""", ) assert "We have not implemented this for DCS of type" in str(result.exception) @patch("patroni.etcd.Etcd.watch", Mock(return_value=None)) @patch("patroni.etcd.Etcd.get_cluster", Mock(return_value=get_cluster_initialized_with_leader())) def test_wait_for_leader(self): dcs = self.e self.assertRaises(patroni.exceptions.PatroniCtlException, wait_for_leader, dcs, 0) cluster = wait_for_leader(dcs=dcs, timeout=2) assert cluster.leader.member.name == "leader" def test_post_patroni(self): member = get_cluster_initialized_with_leader().leader.member self.assertRaises(requests.exceptions.ConnectionError, post_patroni, member, "dummy", {}) def test_ctl(self): runner = CliRunner() runner.invoke(ctl, ["list"]) result = runner.invoke(ctl, ["--help"]) assert "Usage:" in result.output def test_get_any_member(self): m = get_any_member(get_cluster_initialized_without_leader(), role="master") assert m is None m = get_any_member(get_cluster_initialized_with_leader(), role="master") assert m.name == "leader" def test_get_all_members(self): r = list(get_all_members(get_cluster_initialized_without_leader(), role="master")) assert len(r) == 0 r = list(get_all_members(get_cluster_initialized_with_leader(), role="master")) assert len(r) == 1 assert r[0].name == "leader" r = list(get_all_members(get_cluster_initialized_with_leader(), role="replica")) assert len(r) == 1 assert r[0].name == "other" r = list(get_all_members(get_cluster_initialized_without_leader(), role="replica")) assert len(r) == 2 @patch("patroni.etcd.Etcd.get_cluster", Mock(return_value=get_cluster_initialized_with_leader())) @patch("patroni.etcd.Etcd.get_etcd_client", Mock(return_value=None)) @patch("requests.get", requests_get) @patch("requests.post", requests_get) def test_members(self): runner = CliRunner() result = runner.invoke(members, ["alpha"]) assert result.exit_code == 0 def test_configure(self): runner = CliRunner() result = runner.invoke(configure, ["--dcs", "abc", "-c", "dummy", "-n", "bla"]) assert result.exit_code == 0
class TestEtcd(unittest.TestCase): def setUp(self): with patch.object(Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(return_value=[ 'http://*****:*****@patch('dns.resolver.query', dns_query) def test_get_etcd_client(self): with patch.object(etcd.Client, 'machines') as mock_machines: mock_machines.__get__ = Mock(side_effect=etcd.EtcdException) with patch('time.sleep', Mock(side_effect=SleepException())): self.assertRaises(SleepException, self.etcd.get_etcd_client, {'discovery_srv': 'test'}) def test_get_cluster(self): self.assertIsInstance(self.etcd.get_cluster(), Cluster) self.etcd._base_path = '/service/nocluster' cluster = self.etcd.get_cluster() self.assertIsInstance(cluster, Cluster) self.assertIsNone(cluster.leader) def test_current_leader(self): self.assertIsInstance(self.etcd.current_leader(), Leader) self.etcd._base_path = '/service/noleader' self.assertIsNone(self.etcd.current_leader()) def test_touch_member(self): self.assertFalse(self.etcd.touch_member('', '')) def test_take_leader(self): self.assertFalse(self.etcd.take_leader()) def test_attempt_to_acquire_leader(self): self.etcd._base_path = '/service/exists' self.assertFalse(self.etcd.attempt_to_acquire_leader()) self.etcd._base_path = '/service/failed' self.assertFalse(self.etcd.attempt_to_acquire_leader()) def test_write_leader_optime(self): self.etcd.write_leader_optime('0') def test_update_leader(self): self.assertTrue(self.etcd.update_leader()) def test_initialize(self): self.assertFalse(self.etcd.initialize()) def test_cancel_initializion(self): self.assertFalse(self.etcd.cancel_initialization()) def test_delete_leader(self): self.assertFalse(self.etcd.delete_leader()) def test_watch(self): self.etcd.client.watch = etcd_watch self.etcd.watch(0) self.etcd.get_cluster() self.etcd.watch(1.5) self.etcd.watch(4.5) self.etcd.watch(9.5) self.etcd.watch(100) @patch('patroni.etcd.Etcd.retry', Mock(side_effect=AttributeError("foo"))) def test_other_exceptions(self): self.assertRaises(EtcdError, self.etcd.cancel_initialization)