Beispiel #1
0
 def test_get_zk_hosts_directly(self):
     """ Test passing zk_hosts in directly.
     """
     testutil.initialize_kazoo_client_manager(ZK_HOSTS)
     kz_client_manager = KazooClientManager(ZK_HOSTS)
     self.assertEqual(kz_client_manager.get_client().hosts,
                      ",".join(ZK_HOSTS))
Beispiel #2
0
 def test_serverset_destroy(self):
     testutil.initialize_kazoo_client_manager(ZK_HOSTS)
     client = KazooClientManager().get_client()
     client.start()
     fd, tmp_file = tempfile.mkstemp()
     server_set = ServerSet(ServerSetWithFileTestCase.SERVER_SET_DESTROY_PATH,
                            ZK_HOSTS,
                            waiting_in_secs=0.01)
     server_set.join(ServerSetWithFileTestCase.PORT_1, use_ip=False)
     server_set.join(ServerSetWithFileTestCase.PORT_2, use_ip=False)
     # update the local file manually here, suppose there is a daemon
     with open(tmp_file, 'w') as f:
         f.write(ServerSetWithFileTestCase.END_POINT_1 +
                 "\n" +
                 ServerSetWithFileTestCase.END_POINT_2)
     # Give time to let server set join to do its magic.
     gevent.sleep(1)
     server_set._destroy(ServerSetWithFileTestCase.END_POINT_1)
     # update the local file manually here, suppose there is a daemon
     with open(tmp_file, 'w') as f:
         f.write(ServerSetWithFileTestCase.END_POINT_2)
     gevent.sleep(1)
     children = client.get_children(
         ServerSetWithFileTestCase.SERVER_SET_DESTROY_PATH)
     for child in children:
         self.assertFalse(child.endswith(ServerSetWithFileTestCase.END_POINT_1))
     self.FILE_WATCH._clear_all_watches()
     os.remove(tmp_file)
Beispiel #3
0
 def test_serverset_destroy(self):
     testutil.initialize_kazoo_client_manager(ZK_HOSTS)
     client = KazooClientManager().get_client()
     client.start()
     fd, tmp_file = tempfile.mkstemp()
     server_set = ServerSet(
         ServerSetWithFileTestCase.SERVER_SET_DESTROY_PATH,
         ZK_HOSTS,
         waiting_in_secs=0.01)
     server_set.join(ServerSetWithFileTestCase.PORT_1, use_ip=False)
     server_set.join(ServerSetWithFileTestCase.PORT_2, use_ip=False)
     # update the local file manually here, suppose there is a daemon
     with open(tmp_file, 'w') as f:
         f.write(ServerSetWithFileTestCase.END_POINT_1 + "\n" +
                 ServerSetWithFileTestCase.END_POINT_2)
     # Give time to let server set join to do its magic.
     gevent.sleep(1)
     server_set._destroy(ServerSetWithFileTestCase.END_POINT_1)
     # update the local file manually here, suppose there is a daemon
     with open(tmp_file, 'w') as f:
         f.write(ServerSetWithFileTestCase.END_POINT_2)
     gevent.sleep(1)
     children = client.get_children(
         ServerSetWithFileTestCase.SERVER_SET_DESTROY_PATH)
     for child in children:
         self.assertFalse(
             child.endswith(ServerSetWithFileTestCase.END_POINT_1))
     self.FILE_WATCH._clear_all_watches()
     os.remove(tmp_file)
Beispiel #4
0
 def test_random_host_selector_with_serverset(self):
     testutil.initialize_kazoo_client_manager(ZK_HOSTS)
     kazoo_client = KazooClientManager().get_client()
     kazoo_client.ensure_path(HostSelectorTestCase.SERVER_SET_PATH)
     host_provider = HostsProvider(HostSelectorTestCase.PORT_LIST,
                                   HostSelectorTestCase.SERVER_SET_PATH)
     self.assertTrue(host_provider.initialized)
     self.assertTrue(host_provider.hosts)
     # Since there is no live hosts in the server set, host provider should
     # still use the static host list.
     self.assertEqual(host_provider._current_host_tuple,
                      host_provider._static_host_tuple)
     random_host_selector = RandomHostSelector(
         host_provider, expire_time=0, retry_time=0,
         invalidation_threshold=1.0)
     self.assertTrue(random_host_selector.get_host() in
                     HostSelectorTestCase.PORT_LIST)
     server_set = ServerSet(HostSelectorTestCase.SERVER_SET_PATH, ZK_HOSTS)
     g = server_set.join(HostSelectorTestCase.PORT_LIST[0], use_ip=False)
     g.get()
     no_of_iterations = 100
     # After the first endpoint joins, random host selector should only
     # start to use hosts in the server set.
     returned_hosts = [random_host_selector.get_host()
                       for i in xrange(no_of_iterations)]
     self.assertEqual(len(set(returned_hosts)), 1)
     self.assertEqual(len(host_provider.hosts), 1)
     g = server_set.join(HostSelectorTestCase.PORT_LIST[1], use_ip=False)
     g.get()
     # After the second endpoint joins the server set, random host selector
     # should return both endpoints now.
     returned_hosts = [random_host_selector.get_host()
                       for i in xrange(no_of_iterations)]
     self.assertEqual(len(set(returned_hosts)), 2)
     self.assertEqual(len(host_provider.hosts), 2)
Beispiel #5
0
    def test_server_set(self):
        """Test various failure scenarios on server set implementation.

        1. When a new server joins the set, the watcher should be notified.
        2. When the underlying zk client disconnects and then recovers,
           the server set should be transparent to server set participants
           and watchers.
        3. When the underlying zk client messes up beyond recovery,
           the underlying client should be replaced, and this should be
           transparent to server set participants and watchers.

        """
        all_children = []
        watcher_triggered = Event()

        def server_set_watcher(children):
            while all_children:
                all_children.pop()
            for child in children:
                all_children.append(child)
            watcher_triggered.set()

        testutil.initialize_kazoo_client_manager(ZK_HOSTS)
        client = KazooClientManager().get_client()
        server_set = ServerSet(ServerSetTestCase.SERVER_SET_PATH,
                               ZK_HOSTS,
                               waiting_in_secs=0.01)
        server_set.join(ServerSetTestCase.PORT_1, use_ip=True).join()
        server_set.monitor(server_set_watcher).join()
        watcher_triggered.wait(1)
        # Now the server set should only contain end point 1
        self.assertEqual(all_children, [ServerSetTestCase.END_POINT_1])
        watcher_triggered.clear()
        server_set.join(ServerSetTestCase.PORT_2, use_ip=True).join()
        watcher_triggered.wait(1)
        all_children.sort()
        # Now the server set should contain both end point 1 and 2
        self.assertEqual(all_children, ServerSetTestCase.END_POINTS)
        # Test recoverable failure
        client.stop()
        watcher_triggered.clear()
        client.start()
        watcher_triggered.wait(1)
        # Server set should remain the same when the client recovers
        all_children.sort()
        self.assertEqual(all_children, ServerSetTestCase.END_POINTS)
        # Test client change
        client.stop()
        watcher_triggered.clear()
        # give the monit greenlet a chance to detect failures
        gevent.sleep(1)
        # Assert the client has been replaced with a new one
        self.assertFalse(KazooClientManager().get_client() is client)
        watcher_triggered.wait(1)
        # Server set should survive the underlying client being swapped out
        all_children.sort()
        self.assertEqual(all_children, ServerSetTestCase.END_POINTS)
Beispiel #6
0
    def test_server_set(self):
        """Test various failure scenarios on server set implementation.

        1. When a new server joins the set, the watcher should be notified.
        2. When the underlying zk client disconnects and then recovers,
           the server set should be transparent to server set participants
           and watchers.
        3. When the underlying zk client messes up beyond recovery,
           the underlying client should be replaced, and this should be
           transparent to server set participants and watchers.

        """
        all_children = []
        watcher_triggered = Event()

        def server_set_watcher(children):
            while all_children:
                all_children.pop()
            for child in children:
                all_children.append(child)
            watcher_triggered.set()

        testutil.initialize_kazoo_client_manager(ZK_HOSTS)
        client = KazooClientManager().get_client()
        server_set = ServerSet(ServerSetTestCase.SERVER_SET_PATH,
                               ZK_HOSTS,
                               waiting_in_secs=0.01)
        server_set.join(ServerSetTestCase.PORT_1, use_ip=True).join()
        server_set.monitor(server_set_watcher).join()
        watcher_triggered.wait(1)
        # Now the server set should only contain end point 1
        self.assertEqual(all_children, [ServerSetTestCase.END_POINT_1])
        watcher_triggered.clear()
        server_set.join(ServerSetTestCase.PORT_2, use_ip=True).join()
        watcher_triggered.wait(1)
        all_children.sort()
        # Now the server set should contain both end point 1 and 2
        self.assertEqual(all_children, ServerSetTestCase.END_POINTS)
        # Test recoverable failure
        client.stop()
        watcher_triggered.clear()
        client.start()
        watcher_triggered.wait(1)
        # Server set should remain the same when the client recovers
        all_children.sort()
        self.assertEqual(all_children, ServerSetTestCase.END_POINTS)
        # Test client change
        client.stop()
        watcher_triggered.clear()
        # give the monit greenlet a chance to detect failures
        gevent.sleep(1)
        # Assert the client has been replaced with a new one
        self.assertFalse(KazooClientManager().get_client() is client)
        watcher_triggered.wait(1)
        # Server set should survive the underlying client being swapped out
        all_children.sort()
        self.assertEqual(all_children, ServerSetTestCase.END_POINTS)
Beispiel #7
0
 def test_serverset_destroy(self):
     testutil.initialize_kazoo_client_manager(ZK_HOSTS)
     client = KazooClientManager().get_client()
     server_set = ServerSet(ServerSetTestCase.SERVER_SET_DESTROY_PATH,
                            ZK_HOSTS,
                            waiting_in_secs=0.01)
     server_set.join(ServerSetTestCase.PORT_1, use_ip=False)
     server_set.join(ServerSetTestCase.PORT_2, use_ip=False)
     # Give time to let server set join to do its magic.
     gevent.sleep(1)
     server_set._destroy(ServerSetTestCase.END_POINT_1)
     gevent.sleep(1)
     children = client.get_children(
         ServerSetTestCase.SERVER_SET_DESTROY_PATH)
     for child in children:
         self.assertFalse(child.endswith(ServerSetTestCase.END_POINT_1))
Beispiel #8
0
 def test_serverset_destroy(self):
     testutil.initialize_kazoo_client_manager(ZK_HOSTS)
     client = KazooClientManager().get_client()
     server_set = ServerSet(ServerSetTestCase.SERVER_SET_DESTROY_PATH,
                            ZK_HOSTS,
                            waiting_in_secs=0.01)
     server_set.join(ServerSetTestCase.PORT_1, use_ip=False)
     server_set.join(ServerSetTestCase.PORT_2, use_ip=False)
     # Give time to let server set join to do its magic.
     gevent.sleep(1)
     server_set._destroy(ServerSetTestCase.END_POINT_1)
     gevent.sleep(1)
     children = client.get_children(
         ServerSetTestCase.SERVER_SET_DESTROY_PATH)
     for child in children:
         self.assertFalse(child.endswith(ServerSetTestCase.END_POINT_1))
Beispiel #9
0
 def test_random_host_selector_with_serverset(self):
     testutil.initialize_kazoo_client_manager(ZK_HOSTS)
     kazoo_client = KazooClientManager().get_client()
     kazoo_client.ensure_path(HostSelectorTestCase.SERVER_SET_PATH)
     host_provider = HostsProvider(HostSelectorTestCase.PORT_LIST,
                                   HostSelectorTestCase.SERVER_SET_PATH)
     self.assertTrue(host_provider.initialized)
     self.assertTrue(host_provider.hosts)
     # Since there is no live hosts in the server set, host provider should
     # still use the static host list.
     self.assertEqual(host_provider._current_host_tuple,
                      host_provider._static_host_tuple)
     random_host_selector = RandomHostSelector(host_provider,
                                               expire_time=0,
                                               retry_time=0,
                                               invalidation_threshold=1.0)
     self.assertTrue(
         random_host_selector.get_host() in HostSelectorTestCase.PORT_LIST)
     server_set = ServerSet(HostSelectorTestCase.SERVER_SET_PATH, ZK_HOSTS)
     g = server_set.join(HostSelectorTestCase.PORT_LIST[0], use_ip=False)
     g.get()
     no_of_iterations = 100
     # After the first endpoint joins, random host selector should only
     # start to use hosts in the server set.
     returned_hosts = [
         random_host_selector.get_host() for i in xrange(no_of_iterations)
     ]
     self.assertEqual(len(set(returned_hosts)), 1)
     self.assertEqual(len(host_provider.hosts), 1)
     g = server_set.join(HostSelectorTestCase.PORT_LIST[1], use_ip=False)
     g.get()
     # After the second endpoint joins the server set, random host selector
     # should return both endpoints now.
     returned_hosts = [
         random_host_selector.get_host() for i in xrange(no_of_iterations)
     ]
     self.assertEqual(len(set(returned_hosts)), 2)
     self.assertEqual(len(host_provider.hosts), 2)
Beispiel #10
0
    def test_data_watcher(self):
        """Test various scenarios for data watcher:

        1. When data get changed, watcher callback should be invoked.
        2. When the underlying zk client disconnects and then recovers,
           the watcher callback should be invoked.
        3. When the underlying zk client messes up beyond recovery,
           the underlying client should be replaced, and once the new client
           is in place, the watcher callback should be invoked again.

        """
        data_stat = []
        watcher_triggered = Event()

        def data_watch(data, stat):
            while data_stat:
                data_stat.pop()
            data_stat.append(data)
            data_stat.append(stat)
            watcher_triggered.set()

        testutil.initialize_kazoo_client_manager(ZK_HOSTS)
        client = KazooClientManager().get_client()
        client.create(DataWatcherTestCase.TEST_PATH,
                      DataWatcherTestCase.DATA_0)
        data_watcher = DataWatcher(DataWatcherTestCase.TEST_PATH,
                                   ZK_HOSTS,
                                   waiting_in_secs=0.01)
        data_watcher.watch(data_watch).join()
        watcher_triggered.wait(1)
        # Now the data and version should be foo and 0.
        self.assertEqual(data_stat[0], DataWatcherTestCase.DATA_0)
        self.assertEqual(data_stat[1].version, 0)
        watcher_triggered.clear()
        client.set(DataWatcherTestCase.TEST_PATH, DataWatcherTestCase.DATA_1)
        watcher_triggered.wait(1)
        # Make sure that watch callback is triggered.
        self.assertEqual(data_stat[0], DataWatcherTestCase.DATA_1)
        self.assertEqual(data_stat[1].version, 1)
        data_stat.pop()
        data_stat.pop()
        # Test recoverable failure
        watcher_triggered.clear()
        client.stop()
        client.start()
        # Here the client actually will call check the znode in the
        # background.
        watcher_triggered.wait(1)
        # Since nothing changed, no notification from the client.
        self.assertFalse(data_stat)
        # Test client change
        client.stop()
        watcher_triggered.clear()
        # give the monit greenlet a chance to detect failures.
        gevent.sleep(1)
        # Assert the client has been replaced with a new one.
        self.assertFalse(KazooClientManager().get_client() is client)
        watcher_triggered.wait(1)
        # Make sure that watch callback is triggered when client is replaced.
        self.assertEqual(data_stat[0], DataWatcherTestCase.DATA_1)
        self.assertEqual(data_stat[1].version, 1)
Beispiel #11
0
 def test_get_zk_hosts_directly(self):
     """ Test passing zk_hosts in directly.
     """
     testutil.initialize_kazoo_client_manager(ZK_HOSTS)
     kz_client_manager = KazooClientManager(ZK_HOSTS)
     self.assertEqual(kz_client_manager.get_client().hosts, ",".join(ZK_HOSTS))
Beispiel #12
0
    def test_server_set(self):
        """Test various failure scenarios on server set implementation.

        1. When a new server joins the set, the watcher should be notified.
           In practice there is a daemon monitoring the server set change in
           zk and update the local file.
        2. When the underlying zk client disconnects and then recovers,
           the server set should be transparent to server set participants
           and watchers.
        3. When the underlying zk client messes up beyond recovery,
           it should be transparent to server set participants and watchers.

        Although when a local file is being watched, now all the code paths
        about the above behaviors got affected, we still want to test all the
        scenarios to make sure nothing breaks when a file is used.

        NOTE: to simulate the behavior in practice, when a server joins or
        leaves, we assume that there is a daemon to make the corresponding
        change to the local file.
        """
        fd, tmp_file = tempfile.mkstemp()
        all_children = []
        watcher_triggered = Event()

        def server_set_watcher(children):
            while all_children:
                all_children.pop()
            for child in children:
                all_children.append(child)
            watcher_triggered.set()

        testutil.initialize_kazoo_client_manager(ZK_HOSTS)
        client = KazooClientManager().get_client()
        server_set = ServerSet(ServerSetWithFileTestCase.SERVER_SET_PATH,
                               ZK_HOSTS,
                               waiting_in_secs=0.01,
                               file_path=tmp_file)
        server_set.join(ServerSetWithFileTestCase.PORT_1, use_ip=False).join()
        # update the local file manually here, suppose there is a daemon
        with open(tmp_file, 'w') as f:
            f.write(ServerSetWithFileTestCase.END_POINT_1)
        gevent.sleep(1)
        server_set.monitor(server_set_watcher).join()
        watcher_triggered.wait(1)
        # Now the server set should only contain end point 1
        self.assertEqual(all_children, [ServerSetWithFileTestCase.END_POINT_1])
        watcher_triggered.clear()
        server_set.join(ServerSetWithFileTestCase.PORT_2, use_ip=False).join()
        # update the local file manually here, suppose there is a daemon
        with open(tmp_file, 'w') as f:
            f.write(ServerSetWithFileTestCase.END_POINT_1 +
                    "\n" +
                    ServerSetWithFileTestCase.END_POINT_2)
        gevent.sleep(1)
        watcher_triggered.wait(1)
        all_children.sort()
        # Now the server set should contain both end point 1 and 2
        self.assertEqual(all_children, ServerSetWithFileTestCase.END_POINTS)
        # Test recoverable failure
        client.stop()
        watcher_triggered.clear()
        client.start()
        watcher_triggered.wait(1)
        # Server set should remain the same when the client recovers
        all_children.sort()
        self.assertEqual(all_children, ServerSetWithFileTestCase.END_POINTS)
        # Test client change
        client.stop()
        watcher_triggered.clear()
        # give the monit greenlet a chance to detect failures
        gevent.sleep(1)
        watcher_triggered.wait(1)
        # Server set should survive the underlying client being swapped out
        all_children.sort()
        self.assertEqual(all_children, ServerSetWithFileTestCase.END_POINTS)

        self.FILE_WATCH._clear_all_watches()
        os.remove(tmp_file)
Beispiel #13
0
    def test_data_watcher(self):
        """Test various scenarios for data watcher:

        1. When data get changed, watcher callback should be invoked.
        2. When the underlying zk client disconnects and then recovers,
           the watcher callback should be invoked.
        3. When the underlying zk client messes up beyond recovery,
           the underlying client should be replaced, and once the new client
           is in place, the watcher callback should be invoked again.

        """
        data_stat = []
        watcher_triggered = Event()

        def data_watch(data, stat):
            while data_stat:
                data_stat.pop()
            data_stat.append(data)
            data_stat.append(stat)
            watcher_triggered.set()

        testutil.initialize_kazoo_client_manager(ZK_HOSTS)
        client = KazooClientManager().get_client()
        client.create(DataWatcherTestCase.TEST_PATH,
                      DataWatcherTestCase.DATA_0)
        data_watcher = DataWatcher(DataWatcherTestCase.TEST_PATH,
                                   ZK_HOSTS,
                                   waiting_in_secs=0.01)
        data_watcher.watch(data_watch).join()
        watcher_triggered.wait(1)
        # Now the data and version should be foo and 0.
        self.assertEqual(data_stat[0], DataWatcherTestCase.DATA_0)
        self.assertEqual(data_stat[1].version, 0)
        watcher_triggered.clear()
        client.set(DataWatcherTestCase.TEST_PATH, DataWatcherTestCase.DATA_1)
        watcher_triggered.wait(1)
        # Make sure that watch callback is triggered.
        self.assertEqual(data_stat[0], DataWatcherTestCase.DATA_1)
        self.assertEqual(data_stat[1].version, 1)
        data_stat.pop()
        data_stat.pop()
        # Test recoverable failure
        watcher_triggered.clear()
        client.stop()
        client.start()
        # Here the client actually will call check the znode in the
        # background.
        watcher_triggered.wait(1)
        # Since nothing changed, no notification from the client.
        self.assertFalse(data_stat)
        # Test client change
        client.stop()
        watcher_triggered.clear()
        # give the monit greenlet a chance to detect failures.
        gevent.sleep(1)
        # Assert the client has been replaced with a new one.
        self.assertFalse(KazooClientManager().get_client() is client)
        watcher_triggered.wait(1)
        # Make sure that watch callback is triggered when client is replaced.
        self.assertEqual(data_stat[0], DataWatcherTestCase.DATA_1)
        self.assertEqual(data_stat[1].version, 1)
Beispiel #14
0
    def test_server_set(self):
        """Test various failure scenarios on server set implementation.

        1. When a new server joins the set, the watcher should be notified.
           In practice there is a daemon monitoring the server set change in
           zk and update the local file.
        2. When the underlying zk client disconnects and then recovers,
           the server set should be transparent to server set participants
           and watchers.
        3. When the underlying zk client messes up beyond recovery,
           it should be transparent to server set participants and watchers.

        Although when a local file is being watched, now all the code paths
        about the above behaviors got affected, we still want to test all the
        scenarios to make sure nothing breaks when a file is used.

        NOTE: to simulate the behavior in practice, when a server joins or
        leaves, we assume that there is a daemon to make the corresponding
        change to the local file.
        """
        fd, tmp_file = tempfile.mkstemp()
        all_children = []
        watcher_triggered = Event()

        def server_set_watcher(children):
            while all_children:
                all_children.pop()
            for child in children:
                all_children.append(child)
            watcher_triggered.set()

        testutil.initialize_kazoo_client_manager(ZK_HOSTS)
        client = KazooClientManager().get_client()
        server_set = ServerSet(ServerSetWithFileTestCase.SERVER_SET_PATH,
                               ZK_HOSTS,
                               waiting_in_secs=0.01,
                               file_path=tmp_file)
        server_set.join(ServerSetWithFileTestCase.PORT_1, use_ip=False).join()
        # update the local file manually here, suppose there is a daemon
        with open(tmp_file, 'w') as f:
            f.write(ServerSetWithFileTestCase.END_POINT_1)
        gevent.sleep(1)
        server_set.monitor(server_set_watcher).join()
        watcher_triggered.wait(1)
        # Now the server set should only contain end point 1
        self.assertEqual(all_children, [ServerSetWithFileTestCase.END_POINT_1])
        watcher_triggered.clear()
        server_set.join(ServerSetWithFileTestCase.PORT_2, use_ip=False).join()
        # update the local file manually here, suppose there is a daemon
        with open(tmp_file, 'w') as f:
            f.write(ServerSetWithFileTestCase.END_POINT_1 + "\n" +
                    ServerSetWithFileTestCase.END_POINT_2)
        gevent.sleep(1)
        watcher_triggered.wait(1)
        all_children.sort()
        # Now the server set should contain both end point 1 and 2
        self.assertEqual(all_children, ServerSetWithFileTestCase.END_POINTS)
        # Test recoverable failure
        client.stop()
        watcher_triggered.clear()
        client.start()
        watcher_triggered.wait(1)
        # Server set should remain the same when the client recovers
        all_children.sort()
        self.assertEqual(all_children, ServerSetWithFileTestCase.END_POINTS)
        # Test client change
        client.stop()
        watcher_triggered.clear()
        # give the monit greenlet a chance to detect failures
        gevent.sleep(1)
        watcher_triggered.wait(1)
        # Server set should survive the underlying client being swapped out
        all_children.sort()
        self.assertEqual(all_children, ServerSetWithFileTestCase.END_POINTS)

        self.FILE_WATCH._clear_all_watches()
        os.remove(tmp_file)