def test_server_set(self): """Test various failure scenarios on server set implementation. 1. When a new server joins the set, the watcher should be notified. 2. When the underlying zk client disconnects and then recovers, the server set should be transparent to server set participants and watchers. 3. When the underlying zk client messes up beyond recovery, the underlying client should be replaced, and this should be transparent to server set participants and watchers. """ all_children = [] watcher_triggered = Event() def server_set_watcher(children): while all_children: all_children.pop() for child in children: all_children.append(child) watcher_triggered.set() testutil.initialize_kazoo_client_manager(ZK_HOSTS) client = KazooClientManager().get_client() server_set = ServerSet(ServerSetTestCase.SERVER_SET_PATH, ZK_HOSTS, waiting_in_secs=0.01) server_set.join(ServerSetTestCase.PORT_1, use_ip=True).join() server_set.monitor(server_set_watcher).join() watcher_triggered.wait(1) # Now the server set should only contain end point 1 self.assertEqual(all_children, [ServerSetTestCase.END_POINT_1]) watcher_triggered.clear() server_set.join(ServerSetTestCase.PORT_2, use_ip=True).join() watcher_triggered.wait(1) all_children.sort() # Now the server set should contain both end point 1 and 2 self.assertEqual(all_children, ServerSetTestCase.END_POINTS) # Test recoverable failure client.stop() watcher_triggered.clear() client.start() watcher_triggered.wait(1) # Server set should remain the same when the client recovers all_children.sort() self.assertEqual(all_children, ServerSetTestCase.END_POINTS) # Test client change client.stop() watcher_triggered.clear() # give the monit greenlet a chance to detect failures gevent.sleep(1) # Assert the client has been replaced with a new one self.assertFalse(KazooClientManager().get_client() is client) watcher_triggered.wait(1) # Server set should survive the underlying client being swapped out all_children.sort() self.assertEqual(all_children, ServerSetTestCase.END_POINTS)
def test_data_watcher(self): """Test various scenarios for data watcher: 1. When data get changed, watcher callback should be invoked. 2. When the underlying zk client disconnects and then recovers, the watcher callback should be invoked. 3. When the underlying zk client messes up beyond recovery, the underlying client should be replaced, and once the new client is in place, the watcher callback should be invoked again. """ data_stat = [] watcher_triggered = Event() def data_watch(data, stat): while data_stat: data_stat.pop() data_stat.append(data) data_stat.append(stat) watcher_triggered.set() testutil.initialize_kazoo_client_manager(ZK_HOSTS) client = KazooClientManager().get_client() client.create(DataWatcherTestCase.TEST_PATH, DataWatcherTestCase.DATA_0) data_watcher = DataWatcher(DataWatcherTestCase.TEST_PATH, ZK_HOSTS, waiting_in_secs=0.01) data_watcher.watch(data_watch).join() watcher_triggered.wait(1) # Now the data and version should be foo and 0. self.assertEqual(data_stat[0], DataWatcherTestCase.DATA_0) self.assertEqual(data_stat[1].version, 0) watcher_triggered.clear() client.set(DataWatcherTestCase.TEST_PATH, DataWatcherTestCase.DATA_1) watcher_triggered.wait(1) # Make sure that watch callback is triggered. self.assertEqual(data_stat[0], DataWatcherTestCase.DATA_1) self.assertEqual(data_stat[1].version, 1) data_stat.pop() data_stat.pop() # Test recoverable failure watcher_triggered.clear() client.stop() client.start() # Here the client actually will call check the znode in the # background. watcher_triggered.wait(1) # Since nothing changed, no notification from the client. self.assertFalse(data_stat) # Test client change client.stop() watcher_triggered.clear() # give the monit greenlet a chance to detect failures. gevent.sleep(1) # Assert the client has been replaced with a new one. self.assertFalse(KazooClientManager().get_client() is client) watcher_triggered.wait(1) # Make sure that watch callback is triggered when client is replaced. self.assertEqual(data_stat[0], DataWatcherTestCase.DATA_1) self.assertEqual(data_stat[1].version, 1)
def test_server_set(self): """Test various failure scenarios on server set implementation. 1. When a new server joins the set, the watcher should be notified. In practice there is a daemon monitoring the server set change in zk and update the local file. 2. When the underlying zk client disconnects and then recovers, the server set should be transparent to server set participants and watchers. 3. When the underlying zk client messes up beyond recovery, it should be transparent to server set participants and watchers. Although when a local file is being watched, now all the code paths about the above behaviors got affected, we still want to test all the scenarios to make sure nothing breaks when a file is used. NOTE: to simulate the behavior in practice, when a server joins or leaves, we assume that there is a daemon to make the corresponding change to the local file. """ fd, tmp_file = tempfile.mkstemp() all_children = [] watcher_triggered = Event() def server_set_watcher(children): while all_children: all_children.pop() for child in children: all_children.append(child) watcher_triggered.set() testutil.initialize_kazoo_client_manager(ZK_HOSTS) client = KazooClientManager().get_client() server_set = ServerSet(ServerSetWithFileTestCase.SERVER_SET_PATH, ZK_HOSTS, waiting_in_secs=0.01, file_path=tmp_file) server_set.join(ServerSetWithFileTestCase.PORT_1, use_ip=False).join() # update the local file manually here, suppose there is a daemon with open(tmp_file, 'w') as f: f.write(ServerSetWithFileTestCase.END_POINT_1) gevent.sleep(1) server_set.monitor(server_set_watcher).join() watcher_triggered.wait(1) # Now the server set should only contain end point 1 self.assertEqual(all_children, [ServerSetWithFileTestCase.END_POINT_1]) watcher_triggered.clear() server_set.join(ServerSetWithFileTestCase.PORT_2, use_ip=False).join() # update the local file manually here, suppose there is a daemon with open(tmp_file, 'w') as f: f.write(ServerSetWithFileTestCase.END_POINT_1 + "\n" + ServerSetWithFileTestCase.END_POINT_2) gevent.sleep(1) watcher_triggered.wait(1) all_children.sort() # Now the server set should contain both end point 1 and 2 self.assertEqual(all_children, ServerSetWithFileTestCase.END_POINTS) # Test recoverable failure client.stop() watcher_triggered.clear() client.start() watcher_triggered.wait(1) # Server set should remain the same when the client recovers all_children.sort() self.assertEqual(all_children, ServerSetWithFileTestCase.END_POINTS) # Test client change client.stop() watcher_triggered.clear() # give the monit greenlet a chance to detect failures gevent.sleep(1) watcher_triggered.wait(1) # Server set should survive the underlying client being swapped out all_children.sort() self.assertEqual(all_children, ServerSetWithFileTestCase.END_POINTS) self.FILE_WATCH._clear_all_watches() os.remove(tmp_file)