Ejemplo n.º 1
0
class KazooTreeCacheTests(KazooAdaptiveHandlerTestCase):
    def setUp(self):
        super(KazooTreeCacheTests, self).setUp()
        self._event_queue = self.client.handler.queue_impl()
        self._error_queue = self.client.handler.queue_impl()
        self.path = None
        self.cache = None

    def tearDown(self):
        if not self._error_queue.empty():
            try:
                raise self._error_queue.get()
            except FakeException:
                pass
        if self.cache is not None:
            self.cache.close()
            self.cache = None
        super(KazooTreeCacheTests, self).tearDown()

    def make_cache(self):
        if self.cache is None:
            self.path = '/' + uuid.uuid4().hex
            self.cache = TreeCache(self.client, self.path)
            self.cache.listen(lambda event: self._event_queue.put(event))
            self.cache.listen_fault(lambda error: self._error_queue.put(error))
            self.cache.start()
        return self.cache

    def wait_cache(self, expect=None, since=None, timeout=10):
        started = since is None
        while True:
            event = self._event_queue.get(timeout=timeout)
            if started:
                if expect is not None:
                    eq_(event.event_type, expect)
                return event
            if event.event_type == since:
                started = True
                if expect is None:
                    return

    def spy_client(self, method_name):
        method = getattr(self.client, method_name)
        return patch.object(self.client, method_name, wraps=method)

    def _wait_gc(self):
        # trigger switching on some coroutine handlers
        self.client.handler.sleep_func(0.1)

        completion_queue = getattr(self.handler, 'completion_queue', None)
        if completion_queue is not None:
            while not self.client.handler.completion_queue.empty():
                self.client.handler.sleep_func(0.1)

        for gen in range(3):
            gc.collect(gen)

    def count_tree_node(self):
        # inspect GC and count tree nodes for checking memory leak
        for retry in range(10):
            result = set()
            for _ in range(5):
                self._wait_gc()
                result.add(count_refs_by_type('TreeNode'))
            if len(result) == 1:
                return list(result)[0]
        raise RuntimeError('could not count refs exactly')

    def test_start(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        stat = self.client.exists(self.path)
        eq_(stat.version, 0)

        eq_(self.cache._state, TreeCache.STATE_STARTED)
        eq_(self.cache._root._state, TreeNode.STATE_LIVE)

    @raises(KazooException)
    def test_start_started(self):
        self.make_cache()
        self.cache.start()

    @raises(KazooException)
    def test_start_closed(self):
        self.make_cache()
        self.cache.start()
        self.cache.close()
        self.cache.start()

    def test_close(self):
        eq_(self.count_tree_node(), 0)

        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.client.create(self.path + '/foo/bar/baz', makepath=True)
        for _ in range(3):
            self.wait_cache(TreeEvent.NODE_ADDED)

        # setup stub watchers which are outside of tree cache
        stub_data_watcher = Mock(spec=lambda event: None)
        stub_child_watcher = Mock(spec=lambda event: None)
        self.client.get(self.path + '/foo', stub_data_watcher)
        self.client.get_children(self.path + '/foo', stub_child_watcher)

        # watchers inside tree cache should be here
        root_path = self.client.chroot + self.path
        eq_(len(self.client._data_watchers[root_path + '/foo']), 2)
        eq_(len(self.client._data_watchers[root_path + '/foo/bar']), 1)
        eq_(len(self.client._data_watchers[root_path + '/foo/bar/baz']), 1)
        eq_(len(self.client._child_watchers[root_path + '/foo']), 2)
        eq_(len(self.client._child_watchers[root_path + '/foo/bar']), 1)
        eq_(len(self.client._child_watchers[root_path + '/foo/bar/baz']), 1)

        self.cache.close()

        # nothing should be published since tree closed
        ok_(self._event_queue.empty())

        # tree should be empty
        eq_(self.cache._root._children, {})
        eq_(self.cache._root._data, None)
        eq_(self.cache._state, TreeCache.STATE_CLOSED)

        # node state should not be changed
        assert_not_equal(self.cache._root._state, TreeNode.STATE_DEAD)

        # watchers should be reset
        eq_(len(self.client._data_watchers[root_path + '/foo']), 1)
        eq_(len(self.client._data_watchers[root_path + '/foo/bar']), 0)
        eq_(len(self.client._data_watchers[root_path + '/foo/bar/baz']), 0)
        eq_(len(self.client._child_watchers[root_path + '/foo']), 1)
        eq_(len(self.client._child_watchers[root_path + '/foo/bar']), 0)
        eq_(len(self.client._child_watchers[root_path + '/foo/bar/baz']), 0)

        # outside watchers should not be deleted
        eq_(
            list(self.client._data_watchers[root_path + '/foo'])[0],
            stub_data_watcher)
        eq_(
            list(self.client._child_watchers[root_path + '/foo'])[0],
            stub_child_watcher)

        # should not be any leaked memory (tree node) here
        self.cache = None
        eq_(self.count_tree_node(), 0)

    def test_delete_operation(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        eq_(self.count_tree_node(), 1)

        self.client.create(self.path + '/foo/bar/baz', makepath=True)
        for _ in range(3):
            self.wait_cache(TreeEvent.NODE_ADDED)

        self.client.delete(self.path + '/foo', recursive=True)
        for _ in range(3):
            self.wait_cache(TreeEvent.NODE_REMOVED)

        # tree should be empty
        eq_(self.cache._root._children, {})

        # watchers should be reset
        root_path = self.client.chroot + self.path
        eq_(self.client._data_watchers[root_path + '/foo'], set())
        eq_(self.client._data_watchers[root_path + '/foo/bar'], set())
        eq_(self.client._data_watchers[root_path + '/foo/bar/baz'], set())
        eq_(self.client._child_watchers[root_path + '/foo'], set())
        eq_(self.client._child_watchers[root_path + '/foo/bar'], set())
        eq_(self.client._child_watchers[root_path + '/foo/bar/baz'], set())

        # should not be any leaked memory (tree node) here
        eq_(self.count_tree_node(), 1)

    def test_children_operation(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        self.client.create(self.path + '/test_children', b'test_children_1')
        event = self.wait_cache(TreeEvent.NODE_ADDED)
        eq_(event.event_type, TreeEvent.NODE_ADDED)
        eq_(event.event_data.path, self.path + '/test_children')
        eq_(event.event_data.data, b'test_children_1')
        eq_(event.event_data.stat.version, 0)

        self.client.set(self.path + '/test_children', b'test_children_2')
        event = self.wait_cache(TreeEvent.NODE_UPDATED)
        eq_(event.event_type, TreeEvent.NODE_UPDATED)
        eq_(event.event_data.path, self.path + '/test_children')
        eq_(event.event_data.data, b'test_children_2')
        eq_(event.event_data.stat.version, 1)

        self.client.delete(self.path + '/test_children')
        event = self.wait_cache(TreeEvent.NODE_REMOVED)
        eq_(event.event_type, TreeEvent.NODE_REMOVED)
        eq_(event.event_data.path, self.path + '/test_children')
        eq_(event.event_data.data, b'test_children_2')
        eq_(event.event_data.stat.version, 1)

    def test_subtree_operation(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        self.client.create(self.path + '/foo/bar/baz', makepath=True)
        for relative_path in ('/foo', '/foo/bar', '/foo/bar/baz'):
            event = self.wait_cache(TreeEvent.NODE_ADDED)
            eq_(event.event_type, TreeEvent.NODE_ADDED)
            eq_(event.event_data.path, self.path + relative_path)
            eq_(event.event_data.data, b'')
            eq_(event.event_data.stat.version, 0)

        self.client.delete(self.path + '/foo', recursive=True)
        for relative_path in ('/foo/bar/baz', '/foo/bar', '/foo'):
            event = self.wait_cache(TreeEvent.NODE_REMOVED)
            eq_(event.event_type, TreeEvent.NODE_REMOVED)
            eq_(event.event_data.path, self.path + relative_path)

    def test_get_data(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.client.create(self.path + '/foo/bar/baz', b'@', makepath=True)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_data(self.path).data, b'')
            eq_(cache.get_data(self.path).stat.version, 0)

            eq_(cache.get_data(self.path + '/foo').data, b'')
            eq_(cache.get_data(self.path + '/foo').stat.version, 0)

            eq_(cache.get_data(self.path + '/foo/bar').data, b'')
            eq_(cache.get_data(self.path + '/foo/bar').stat.version, 0)

            eq_(cache.get_data(self.path + '/foo/bar/baz').data, b'@')
            eq_(cache.get_data(self.path + '/foo/bar/baz').stat.version, 0)

    def test_get_children(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.client.create(self.path + '/foo/bar/baz', b'@', makepath=True)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_children(self.path + '/foo/bar/baz'), frozenset())
            eq_(cache.get_children(self.path + '/foo/bar'), frozenset(['baz']))
            eq_(cache.get_children(self.path + '/foo'), frozenset(['bar']))
            eq_(cache.get_children(self.path), frozenset(['foo']))

    @raises(ValueError)
    def test_get_data_out_of_tree(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.cache.get_data('/out_of_tree')

    @raises(ValueError)
    def test_get_children_out_of_tree(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.cache.get_children('/out_of_tree')

    def test_get_data_no_node(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_data(self.path + '/non_exists'), None)

    def test_get_children_no_node(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_children(self.path + '/non_exists'), None)

    def test_session_reconnected(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        self.client.create(self.path + '/foo')
        event = self.wait_cache(TreeEvent.NODE_ADDED)
        eq_(event.event_data.path, self.path + '/foo')

        with self.spy_client('get_async') as get_data:
            with self.spy_client('get_children_async') as get_children:
                # session suspended
                self.lose_connection(self.client.handler.event_object)
                self.wait_cache(TreeEvent.CONNECTION_SUSPENDED)

                # There are a serial refreshing operation here. But NODE_ADDED
                # events will not be raised because the zxid of nodes are the
                # same during reconnecting.

                # connection restore
                self.wait_cache(TreeEvent.CONNECTION_RECONNECTED)

                # wait for outstanding operations
                while self.cache._outstanding_ops > 0:
                    self.client.handler.sleep_func(0.1)

                # inspect in-memory nodes
                _node_root = self.cache._root
                _node_foo = self.cache._root._children['foo']

                # make sure that all nodes are refreshed
                get_data.assert_has_calls([
                    call(self.path, watch=_node_root._process_watch),
                    call(self.path + '/foo', watch=_node_foo._process_watch),
                ],
                                          any_order=True)
                get_children.assert_has_calls([
                    call(self.path, watch=_node_root._process_watch),
                    call(self.path + '/foo', watch=_node_foo._process_watch),
                ],
                                              any_order=True)

    def test_root_recreated(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        # remove root node
        self.client.delete(self.path)
        event = self.wait_cache(TreeEvent.NODE_REMOVED)
        eq_(event.event_type, TreeEvent.NODE_REMOVED)
        eq_(event.event_data.data, b'')
        eq_(event.event_data.path, self.path)
        eq_(event.event_data.stat.version, 0)

        # re-create root node
        self.client.ensure_path(self.path)
        event = self.wait_cache(TreeEvent.NODE_ADDED)
        eq_(event.event_type, TreeEvent.NODE_ADDED)
        eq_(event.event_data.data, b'')
        eq_(event.event_data.path, self.path)
        eq_(event.event_data.stat.version, 0)

        self.assertTrue(
            self.cache._outstanding_ops >= 0,
            'unexpected outstanding ops %r' % self.cache._outstanding_ops)

    def test_exception_handler(self):
        error_value = FakeException()
        error_handler = Mock()

        with patch.object(TreeNode, 'on_deleted') as on_deleted:
            on_deleted.side_effect = [error_value]

            self.make_cache()
            self.cache.listen_fault(error_handler)

            self.cache.close()
            error_handler.assert_called_once_with(error_value)

    def test_exception_suppressed(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        # stoke up ConnectionClosedError
        self.client.stop()
        self.client.close()
        self.client.handler.start()  # keep the async completion
        self.wait_cache(since=TreeEvent.CONNECTION_LOST)

        with patch.object(TreeNode, 'on_created') as on_created:
            self.cache._root._call_client('exists', '/')
            self.cache._root._call_client('get', '/')
            self.cache._root._call_client('get_children', '/')

            self.wait_cache(since=TreeEvent.INITIALIZED)
            on_created.assert_not_called()
            eq_(self.cache._outstanding_ops, 0)
Ejemplo n.º 2
0
class ZookeeperClusterManager(ClusterManager):
    """
    A cluster manager that manages one cluster's state and configurations
    with a Zookeeper ensemble via kazoo.

    Below is the structure of the znodes:
        /needlestack
            /<CLUSTER_NAME_1>
                /live_nodes
                    /<HOSTPORT_1>
                    /<HOSTPORT_2>
                    /<HOSTPORT_3>
                    /<HOSTPORT_4>
                    ...
                /collections
                    /<COLLECTION_NAME_1>
                        /shards
                            /<SHARD_NAME_1>
                                /replicas
                                    /<HOSTPORT_2>
                                    /<HOSTPORT_4>
                            /<SHARD_NAME_2>
                                /replicas
                                    /<HOSTPORT_1>
                                    /<HOSTPORT_3>
                    /<COLLECTION_NAME_2>
                        ...
    """

    cluster_name: str
    hostport: str
    zk: KazooClient
    cache: TreeCache

    def __init__(self, cluster_name: str, hostport: str, hosts: List[str],
                 zookeeper_root: str):
        self.cluster_name = cluster_name
        self.hostport = hostport
        self.zookeeper_root = zookeeper_root
        self.zk = KazooClient(hosts=hosts)
        self.zk.add_listener(self.zk_listener)
        self.cache = TreeCache(self.zk, self.base_znode)

    @property
    def base_znode(self):
        return f"{self.zookeeper_root}/{self.cluster_name}"

    @property
    def live_nodes_znode(self):
        return f"{self.base_znode}/live_nodes"

    @property
    def this_node_znode(self):
        return f"{self.base_znode}/live_nodes/{self.hostport}"

    @property
    def collections_znode(self):
        return f"{self.base_znode}/collections"

    def collection_znode(self, collection_name: str) -> str:
        return f"{self.collections_znode}/{collection_name}"

    def shard_znode(self, collection_name: str, shard_name: str = None) -> str:
        znode = f"{self.collections_znode}/{collection_name}/shards"
        if shard_name:
            znode += "/" + shard_name
        return znode

    def replica_znode(self,
                      collection_name: str,
                      shard_name: str,
                      hostport: str = None) -> str:
        shard_znode = self.shard_znode(collection_name, shard_name)
        znode = f"{shard_znode}/replicas"
        if hostport:
            znode += "/" + hostport
        return znode

    def startup(self):
        self.zk.start()
        self.cache.start()
        signal.signal(signal.SIGINT, self.signal_listener)
        signal.signal(signal.SIGTERM, self.signal_listener)
        self.zk.ensure_path(self.live_nodes_znode)
        self.zk.ensure_path(self.collections_znode)

    def shutdown(self):
        self.cache.close()
        self.zk.stop()
        self.zk.close()

    def cleanup(self):
        logger.info(f"Removing ZNodes via cleanup")
        transaction = self.zk.transaction()

        for collection in self.list_local_collections():
            for shard in collection.shards:
                for replica in shard.replicas:
                    znode = self.replica_znode(collection.name, shard.name,
                                               replica.hostport)
                    transaction.delete(znode)

        self.commit_transaction(transaction)

    def register_merger(self):
        pass

    def register_searcher(self):
        try:
            retrier = KazooRetry(max_tries=5, delay=1, backoff=2, max_delay=20)
            retrier(self.zk.create,
                    self.this_node_znode,
                    ephemeral=True,
                    makepath=True)
            logger.info(f"Created ephemeral ZNode {self.this_node_znode}")
        except kazoo.retry.RetryFailedError:
            logger.error(
                f"Max retries reached for creating ephemeral ZNode {self.this_node_znode}"
            )
        except kazoo.retry.InterruptedError:
            logger.error(
                f"Retries interrupted for creating ephemeral ZNode {self.this_node_znode}"
            )

    def set_state(self,
                  state,
                  collection_name=None,
                  shard_name=None,
                  hostport=None):
        transaction = self.zk.transaction()

        collections = [collection_name] if collection_name else None
        for collection in self._list_collections(collections,
                                                 hostport=hostport,
                                                 load_replica=True):
            logger.info(
                f"Set {collection.name}/shards ZNodes to {collections_pb2.Replica.State.Name(state)}"
            )
            for shard in collection.shards:
                for replica in shard.replicas:
                    znode = self.replica_znode(collection.name, shard.name,
                                               replica.node.hostport)
                    replica.state = state
                    transaction.set_data(znode, replica.SerializeToString())

        return self.commit_transaction(transaction)

    def set_local_state(self, state, collection_name=None, shard_name=None):
        return self.set_state(state, collection_name, shard_name,
                              self.hostport)

    def signal_listener(self, signum, frame):
        self.shutdown()

    def zk_listener(self, state):
        if state == KazooState.LOST:
            logger.warn("Connection to Zookeeper lost")
        elif state == KazooState.SUSPENDED:
            logger.warn("Connection to Zookeeper disconnected")
        else:
            logger.info("Connection to Zookeeper established")

    def add_collections(self, collections):
        """Configure a list of collections into Zookeeper
        """
        transaction = self.zk.transaction()

        for collection in collections:
            collection_copy = deepcopy(collection)
            collection_copy.ClearField("shards")
            collection_znode = self.collection_znode(collection.name)
            transaction.create(collection_znode,
                               collection_copy.SerializeToString())
            transaction.create(self.shard_znode(collection.name))
            for shard in collection.shards:
                shard_copy = deepcopy(shard)
                shard_copy.ClearField("replicas")
                shard_znode = self.shard_znode(collection.name, shard.name)
                transaction.create(shard_znode, shard_copy.SerializeToString())
                transaction.create(
                    self.replica_znode(collection.name, shard.name))
                for replica in shard.replicas:
                    replica_copy = deepcopy(replica)
                    replica_copy.state = collections_pb2.Replica.BOOTING
                    replica_znode = self.replica_znode(collection.name,
                                                       shard.name,
                                                       replica.node.hostport)
                    transaction.create(replica_znode,
                                       replica_copy.SerializeToString())

        if self.commit_transaction(transaction):
            return collections
        else:
            return []

    def delete_collections(self, collection_names):
        transaction = self.zk.transaction()

        for collection_name in collection_names:
            shards_znode = self.shard_znode(collection_name)
            for shard_name in self.zk.get_children(shards_znode):
                replicas_znode = self.replica_znode(collection_name,
                                                    shard_name)
                for replica_name in self.zk.get_children(replicas_znode):
                    replica_znode = self.replica_znode(collection_name,
                                                       shard_name,
                                                       replica_name)
                    transaction.delete(replica_znode)
                transaction.delete(replicas_znode)
                transaction.delete(
                    self.shard_znode(collection_name, shard_name))
            transaction.delete(shards_znode)
            transaction.delete(self.collection_znode(collection_name))

        if self.commit_transaction(transaction):
            return collection_names
        else:
            return []

    def list_nodes(self):
        live_nodes = self.zk.get_children(self.live_nodes_znode)
        nodes = [collections_pb2.Node(hostport=node) for node in live_nodes]
        return nodes

    def list_collections(self, collection_names=None, include_state=True):
        return self._list_collections(collection_names,
                                      load_replica=include_state)

    def list_local_collections(self, include_state=True):
        return self._list_collections(hostport=self.hostport,
                                      load_replica=include_state)

    def _list_collections(
        self,
        collection_names: Optional[List[str]] = None,
        hostport: Optional[str] = None,
        load_replica: Optional[bool] = True,
    ) -> List[collections_pb2.Collection]:
        collections = []

        collection_names = collection_names or self.zk.get_children(
            self.collections_znode)
        for collection_name in collection_names:

            shards = []
            shards_znode = self.shard_znode(collection_name)
            for shard_name in self.zk.get_children(shards_znode):

                replicas = []
                replicas_znode = self.replica_znode(collection_name,
                                                    shard_name)
                for replica_hostport in self.zk.get_children(replicas_znode):
                    if hostport == replica_hostport or hostport is None:
                        replica_znode = self.replica_znode(
                            collection_name, shard_name, replica_hostport)
                        if load_replica:
                            replica_data, _ = self.zk.get(replica_znode)
                            replica_proto = collections_pb2.Replica.FromString(
                                replica_data)
                        else:
                            replica_proto = collections_pb2.Replica()
                        replicas.append(replica_proto)

                if replicas:
                    shard_znode = self.shard_znode(collection_name, shard_name)
                    shard_data, _ = self.zk.get(shard_znode)
                    shard_proto = collections_pb2.Shard.FromString(shard_data)
                    shard_proto.replicas.extend(replicas)
                    shards.append(shard_proto)

            if shards:
                collection_znode = self.collection_znode(collection_name)
                collection_data, _ = self.zk.get(collection_znode)
                collection_proto = collections_pb2.Collection.FromString(
                    collection_data)
                collection_proto.shards.extend(shards)
                collections.append(collection_proto)

        return collections

    def get_searchers(self, collection_name, shard_names=None):
        if not shard_names:
            shards_znode = self.shard_znode(collection_name)
            shard_names = self.cache.get_children(shards_znode, [])

        shard_hostports = []
        for shard_name in shard_names:
            hostports = self._get_searchers_for_shard(collection_name,
                                                      shard_name,
                                                      active=True)
            if hostports:
                shard_hostports.append((shard_name, hostports))
            else:
                logger.error(
                    f"No active Searcher node for {collection_name}/{shard_name}."
                )

        return shard_hostports

    def _get_searchers_for_shard(self,
                                 collection_name: str,
                                 shard_name: str,
                                 active: bool = True) -> List[str]:
        replicas_znode = self.replica_znode(collection_name, shard_name)
        hostports = self.cache.get_children(replicas_znode, [])

        if active:
            active_hostports = []
            for hostport in hostports:
                replica_znode = self.replica_znode(collection_name, shard_name,
                                                   hostport)
                node = self.cache.get_data(replica_znode)
                if node:
                    replica = collections_pb2.Replica.FromString(node.data)
                    if replica.state == collections_pb2.Replica.ACTIVE:
                        active_hostports.append(hostport)
            hostports = active_hostports

        return hostports

    def commit_transaction(
            self, transaction: kazoo.client.TransactionRequest) -> bool:
        """Commit a transaction and log the first exception after rollbacks"""
        for result, operation in zip(transaction.commit(),
                                     transaction.operations):
            if isinstance(result, kazoo.exceptions.RolledBackError):
                continue
            elif isinstance(result, Exception):
                logger.error(
                    f"{result.__class__.__name__} in Kazoo transaction: {operation}"
                )
                return False
        return True
Ejemplo n.º 3
0
class KazooTreeCacheTests(KazooTestCase):
    def setUp(self):
        super(KazooTreeCacheTests, self).setUp()
        self._event_queue = self.client.handler.queue_impl()
        self._error_queue = self.client.handler.queue_impl()
        self.path = None
        self.cache = None

    def tearDown(self):
        super(KazooTreeCacheTests, self).tearDown()
        if not self._error_queue.empty():
            try:
                raise self._error_queue.get()
            except FakeException:
                pass

    def make_cache(self):
        if self.cache is None:
            self.path = '/' + uuid.uuid4().hex
            self.cache = TreeCache(self.client, self.path)
            self.cache.listen(lambda event: self._event_queue.put(event))
            self.cache.listen_fault(lambda error: self._error_queue.put(error))
            self.cache.start()
        return self.cache

    def wait_cache(self, expect=None, since=None, timeout=10):
        started = since is None
        while True:
            event = self._event_queue.get(timeout=timeout)
            if started:
                if expect is not None:
                    eq_(event.event_type, expect)
                return event
            if event.event_type == since:
                started = True
                if expect is None:
                    return

    def spy_client(self, method_name):
        method = getattr(self.client, method_name)
        return patch.object(self.client, method_name, wraps=method)

    def test_start(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        stat = self.client.exists(self.path)
        eq_(stat.version, 0)

        eq_(self.cache._state, TreeCache.STATE_STARTED)
        eq_(self.cache._root._state, TreeNode.STATE_LIVE)

    @raises(KazooException)
    def test_start_started(self):
        self.make_cache()
        self.cache.start()

    @raises(KazooException)
    def test_start_closed(self):
        self.make_cache()
        self.cache.start()
        self.cache.close()
        self.cache.start()

    def test_close(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.client.create(self.path + '/foo/bar/baz', makepath=True)
        for _ in range(3):
            self.wait_cache(TreeEvent.NODE_ADDED)

        self.cache.close()

        # nothing should be published since tree closed
        ok_(self._event_queue.empty())

        # tree should be empty
        eq_(self.cache._root._children, {})
        eq_(self.cache._root._data, None)
        eq_(self.cache._state, TreeCache.STATE_CLOSED)

        # node state should not be changed
        assert_not_equal(self.cache._root._state, TreeNode.STATE_DEAD)

    def test_children_operation(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        self.client.create(self.path + '/test_children', b'test_children_1')
        event = self.wait_cache(TreeEvent.NODE_ADDED)
        eq_(event.event_type, TreeEvent.NODE_ADDED)
        eq_(event.event_data.path, self.path + '/test_children')
        eq_(event.event_data.data, b'test_children_1')
        eq_(event.event_data.stat.version, 0)

        self.client.set(self.path + '/test_children', b'test_children_2')
        event = self.wait_cache(TreeEvent.NODE_UPDATED)
        eq_(event.event_type, TreeEvent.NODE_UPDATED)
        eq_(event.event_data.path, self.path + '/test_children')
        eq_(event.event_data.data, b'test_children_2')
        eq_(event.event_data.stat.version, 1)

        self.client.delete(self.path + '/test_children')
        event = self.wait_cache(TreeEvent.NODE_REMOVED)
        eq_(event.event_type, TreeEvent.NODE_REMOVED)
        eq_(event.event_data.path, self.path + '/test_children')
        eq_(event.event_data.data, b'test_children_2')
        eq_(event.event_data.stat.version, 1)

    def test_subtree_operation(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        self.client.create(self.path + '/foo/bar/baz', makepath=True)
        for relative_path in ('/foo', '/foo/bar', '/foo/bar/baz'):
            event = self.wait_cache(TreeEvent.NODE_ADDED)
            eq_(event.event_type, TreeEvent.NODE_ADDED)
            eq_(event.event_data.path, self.path + relative_path)
            eq_(event.event_data.data, b'')
            eq_(event.event_data.stat.version, 0)

        self.client.delete(self.path + '/foo', recursive=True)
        for relative_path in ('/foo/bar/baz', '/foo/bar', '/foo'):
            event = self.wait_cache(TreeEvent.NODE_REMOVED)
            eq_(event.event_type, TreeEvent.NODE_REMOVED)
            eq_(event.event_data.path, self.path + relative_path)

    def test_get_data(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.client.create(self.path + '/foo/bar/baz', b'@', makepath=True)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_data(self.path).data, b'')
            eq_(cache.get_data(self.path).stat.version, 0)

            eq_(cache.get_data(self.path + '/foo').data, b'')
            eq_(cache.get_data(self.path + '/foo').stat.version, 0)

            eq_(cache.get_data(self.path + '/foo/bar').data, b'')
            eq_(cache.get_data(self.path + '/foo/bar').stat.version, 0)

            eq_(cache.get_data(self.path + '/foo/bar/baz').data, b'@')
            eq_(cache.get_data(self.path + '/foo/bar/baz').stat.version, 0)

    def test_get_children(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.client.create(self.path + '/foo/bar/baz', b'@', makepath=True)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_children(self.path + '/foo/bar/baz'), frozenset())
            eq_(cache.get_children(self.path + '/foo/bar'), frozenset(['baz']))
            eq_(cache.get_children(self.path + '/foo'), frozenset(['bar']))
            eq_(cache.get_children(self.path), frozenset(['foo']))

    @raises(ValueError)
    def test_get_data_out_of_tree(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.cache.get_data('/out_of_tree')

    @raises(ValueError)
    def test_get_children_out_of_tree(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.cache.get_children('/out_of_tree')

    def test_get_data_no_node(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_data(self.path + '/non_exists'), None)

    def test_get_children_no_node(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_children(self.path + '/non_exists'), None)

    def test_session_reconnected(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        self.client.create(self.path + '/foo')
        event = self.wait_cache(TreeEvent.NODE_ADDED)
        eq_(event.event_data.path, self.path + '/foo')

        with self.spy_client('get_async') as get_data:
            with self.spy_client('get_children_async') as get_children:
                # session suspended
                self.lose_connection(self.client.handler.event_object)
                self.wait_cache(TreeEvent.CONNECTION_SUSPENDED)

                # There are a serial refreshing operation here. But NODE_ADDED
                # events will not be raised because the zxid of nodes are the
                # same during reconnecting.

                # connection restore
                self.wait_cache(TreeEvent.CONNECTION_RECONNECTED)

                # wait for outstanding operations
                while self.cache._outstanding_ops > 0:
                    self.client.handler.sleep_func(0.1)

                # inspect in-memory nodes
                _node_root = self.cache._root
                _node_foo = self.cache._root._children['foo']

                # make sure that all nodes are refreshed
                get_data.assert_has_calls([
                    call(self.path, watch=_node_root._process_watch),
                    call(self.path + '/foo', watch=_node_foo._process_watch),
                ],
                                          any_order=True)
                get_children.assert_has_calls([
                    call(self.path, watch=_node_root._process_watch),
                    call(self.path + '/foo', watch=_node_foo._process_watch),
                ],
                                              any_order=True)

    def test_root_recreated(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        # remove root node
        self.client.delete(self.path)
        event = self.wait_cache(TreeEvent.NODE_REMOVED)
        eq_(event.event_type, TreeEvent.NODE_REMOVED)
        eq_(event.event_data.data, b'')
        eq_(event.event_data.path, self.path)
        eq_(event.event_data.stat.version, 0)

        # re-create root node
        self.client.ensure_path(self.path)
        event = self.wait_cache(TreeEvent.NODE_ADDED)
        eq_(event.event_type, TreeEvent.NODE_ADDED)
        eq_(event.event_data.data, b'')
        eq_(event.event_data.path, self.path)
        eq_(event.event_data.stat.version, 0)

        self.assertTrue(
            self.cache._outstanding_ops >= 0,
            'unexpected outstanding ops %r' % self.cache._outstanding_ops)

    def test_exception_handler(self):
        error_value = FakeException()
        error_handler = Mock()

        with patch.object(TreeNode, 'on_deleted') as on_deleted:
            on_deleted.side_effect = [error_value]

            self.make_cache()
            self.cache.listen_fault(error_handler)

            self.cache.close()
            error_handler.assert_called_once_with(error_value)
Ejemplo n.º 4
0
class ZooKeeper(object):
    '''
    Class implementing the ZooKeeper interface.

    This class uses the facade design pattern to keep common interaction
    with the ZooKeeper API simple and consistent for the caller, and
    limits coupling between objects. It allows for more complex interactions
    by providing direct access to the client connection when needed (though
    that is discouraged). It also provides for a convenient entry point for
    testing only ZooKeeper interactions.
    '''

    log = logging.getLogger("zuul.zk.ZooKeeper")

    REQUEST_ROOT = '/nodepool/requests'
    REQUEST_LOCK_ROOT = "/nodepool/requests-lock"
    NODE_ROOT = '/nodepool/nodes'
    HOLD_REQUEST_ROOT = '/zuul/hold-requests'

    # Log zookeeper retry every 10 seconds
    retry_log_rate = 10

    def __init__(self, enable_cache=True):
        '''
        Initialize the ZooKeeper object.

        :param bool enable_cache: When True, enables caching of ZooKeeper
            objects (e.g., HoldRequests).
        '''
        self.client = None
        self._became_lost = False
        self._last_retry_log = 0
        self.enable_cache = enable_cache

        # The caching model we use is designed around handing out model
        # data as objects. To do this, we use two caches: one is a TreeCache
        # which contains raw znode data (among other details), and one for
        # storing that data serialized as objects. This allows us to return
        # objects from the APIs, and avoids calling the methods to serialize
        # the data into objects more than once.
        self._hold_request_tree = None
        self._cached_hold_requests = {}

    def _dictToStr(self, data):
        return json.dumps(data).encode('utf8')

    def _strToDict(self, data):
        return json.loads(data.decode('utf8'))

    def _connection_listener(self, state):
        '''
        Listener method for Kazoo connection state changes.

        .. warning:: This method must not block.
        '''
        if state == KazooState.LOST:
            self.log.debug("ZooKeeper connection: LOST")
            self._became_lost = True
        elif state == KazooState.SUSPENDED:
            self.log.debug("ZooKeeper connection: SUSPENDED")
        else:
            self.log.debug("ZooKeeper connection: CONNECTED")

    @property
    def connected(self):
        return self.client.state == KazooState.CONNECTED

    @property
    def suspended(self):
        return self.client.state == KazooState.SUSPENDED

    @property
    def lost(self):
        return self.client.state == KazooState.LOST

    @property
    def didLoseConnection(self):
        return self._became_lost

    def resetLostFlag(self):
        self._became_lost = False

    def logConnectionRetryEvent(self):
        now = time.monotonic()
        if now - self._last_retry_log >= self.retry_log_rate:
            self.log.warning("Retrying zookeeper connection")
            self._last_retry_log = now

    def connect(self, hosts, read_only=False, timeout=10.0):
        '''
        Establish a connection with ZooKeeper cluster.

        Convenience method if a pre-existing ZooKeeper connection is not
        supplied to the ZooKeeper object at instantiation time.

        :param str hosts: Comma-separated list of hosts to connect to (e.g.
            127.0.0.1:2181,127.0.0.1:2182,[::1]:2183).
        :param bool read_only: If True, establishes a read-only connection.
        :param float timeout: The ZooKeeper session timeout, in
            seconds (default: 10.0).
        '''
        if self.client is None:
            self.client = KazooClient(hosts=hosts,
                                      read_only=read_only,
                                      timeout=timeout)
            self.client.add_listener(self._connection_listener)
            # Manually retry initial connection attempt
            while True:
                try:
                    self.client.start(1)
                    break
                except KazooTimeoutError:
                    self.logConnectionRetryEvent()

        if self.enable_cache:
            self._hold_request_tree = TreeCache(self.client,
                                                self.HOLD_REQUEST_ROOT)
            self._hold_request_tree.listen_fault(self.cacheFaultListener)
            self._hold_request_tree.listen(self.holdRequestCacheListener)
            self._hold_request_tree.start()

    def cacheFaultListener(self, e):
        self.log.exception(e)

    def holdRequestCacheListener(self, event):
        '''
        Keep the hold request object cache in sync with the TreeCache.
        '''
        try:
            self._holdRequestCacheListener(event)
        except Exception:
            self.log.exception(
                "Exception in hold request cache update for event: %s", event)

    def _holdRequestCacheListener(self, event):
        if hasattr(event.event_data, 'path'):
            # Ignore root node
            path = event.event_data.path
            if path == self.HOLD_REQUEST_ROOT:
                return

        if event.event_type not in (TreeEvent.NODE_ADDED,
                                    TreeEvent.NODE_UPDATED,
                                    TreeEvent.NODE_REMOVED):
            return

        path = event.event_data.path
        request_id = path.rsplit('/', 1)[1]

        if event.event_type in (TreeEvent.NODE_ADDED, TreeEvent.NODE_UPDATED):
            # Requests with no data are invalid
            if not event.event_data.data:
                return

            # Perform an in-place update of the already cached request
            d = self._bytesToDict(event.event_data.data)
            old_request = self._cached_hold_requests.get(request_id)
            if old_request:
                if event.event_data.stat.version <= old_request.stat.version:
                    # Don't update to older data
                    return
                old_request.updateFromDict(d)
                old_request.stat = event.event_data.stat
            else:
                request = zuul.model.HoldRequest.fromDict(d)
                request.id = request_id
                request.stat = event.event_data.stat
                self._cached_hold_requests[request_id] = request

        elif event.event_type == TreeEvent.NODE_REMOVED:
            try:
                del self._cached_hold_requests[request_id]
            except KeyError:
                pass

    def disconnect(self):
        '''
        Close the ZooKeeper cluster connection.

        You should call this method if you used connect() to establish a
        cluster connection.
        '''
        if self._hold_request_tree is not None:
            self._hold_request_tree.close()
            self._hold_request_tree = None

        if self.client is not None and self.client.connected:
            self.client.stop()
            self.client.close()
            self.client = None

    def resetHosts(self, hosts):
        '''
        Reset the ZooKeeper cluster connection host list.

        :param str hosts: Comma-separated list of hosts to connect to (e.g.
            127.0.0.1:2181,127.0.0.1:2182,[::1]:2183).
        '''
        if self.client is not None:
            self.client.set_hosts(hosts=hosts)

    def submitNodeRequest(self, node_request, watcher):
        '''
        Submit a request for nodes to Nodepool.

        :param NodeRequest node_request: A NodeRequest with the
            contents of the request.

        :param callable watcher: A callable object that will be
            invoked each time the request is updated.  It is called
            with two arguments: (node_request, deleted) where
            node_request is the same argument passed to this method,
            and deleted is a boolean which is True if the node no
            longer exists (notably, this will happen on disconnection
            from ZooKeeper).  The watcher should return False when
            further updates are no longer necessary.
        '''
        node_request.created_time = time.time()
        data = node_request.toDict()

        path = '{}/{:0>3}-'.format(self.REQUEST_ROOT, node_request.priority)
        path = self.client.create(path,
                                  self._dictToStr(data),
                                  makepath=True,
                                  sequence=True,
                                  ephemeral=True)
        reqid = path.split("/")[-1]
        node_request.id = reqid

        def callback(data, stat):
            if data:
                self.updateNodeRequest(node_request, data)
            deleted = (data is None)  # data *are* none
            return watcher(node_request, deleted)

        self.client.DataWatch(path, callback)

    def deleteNodeRequest(self, node_request):
        '''
        Delete a request for nodes.

        :param NodeRequest node_request: A NodeRequest with the
            contents of the request.
        '''

        path = '%s/%s' % (self.REQUEST_ROOT, node_request.id)
        try:
            self.client.delete(path)
        except kze.NoNodeError:
            pass

    def nodeRequestExists(self, node_request):
        '''
        See if a NodeRequest exists in ZooKeeper.

        :param NodeRequest node_request: A NodeRequest to verify.

        :returns: True if the request exists, False otherwise.
        '''
        path = '%s/%s' % (self.REQUEST_ROOT, node_request.id)
        if self.client.exists(path):
            return True
        return False

    def storeNodeRequest(self, node_request):
        '''Store the node request.

        The request is expected to already exist and is updated in its
        entirety.

        :param NodeRequest node_request: The request to update.
        '''

        path = '%s/%s' % (self.REQUEST_ROOT, node_request.id)
        self.client.set(path, self._dictToStr(node_request.toDict()))

    def updateNodeRequest(self, node_request, data=None):
        '''Refresh an existing node request.

        :param NodeRequest node_request: The request to update.
        :param dict data: The data to use; query ZK if absent.
        '''
        if data is None:
            path = '%s/%s' % (self.REQUEST_ROOT, node_request.id)
            data, stat = self.client.get(path)
        data = self._strToDict(data)
        request_nodes = list(node_request.nodeset.getNodes())
        for i, nodeid in enumerate(data.get('nodes', [])):
            request_nodes[i].id = nodeid
            self.updateNode(request_nodes[i])
        node_request.updateFromDict(data)

    def storeNode(self, node):
        '''Store the node.

        The node is expected to already exist and is updated in its
        entirety.

        :param Node node: The node to update.
        '''

        path = '%s/%s' % (self.NODE_ROOT, node.id)
        self.client.set(path, self._dictToStr(node.toDict()))

    def updateNode(self, node):
        '''Refresh an existing node.

        :param Node node: The node to update.
        '''

        node_path = '%s/%s' % (self.NODE_ROOT, node.id)
        node_data, node_stat = self.client.get(node_path)
        node_data = self._strToDict(node_data)
        node.updateFromDict(node_data)

    def lockNode(self, node, blocking=True, timeout=None):
        '''
        Lock a node.

        This should be called as soon as a request is fulfilled and
        the lock held for as long as the node is in-use.  It can be
        used by nodepool to detect if Zuul has gone offline and the
        node should be reclaimed.

        :param Node node: The node which should be locked.
        '''

        lock_path = '%s/%s/lock' % (self.NODE_ROOT, node.id)
        try:
            lock = Lock(self.client, lock_path)
            have_lock = lock.acquire(blocking, timeout)
        except kze.LockTimeout:
            raise LockException("Timeout trying to acquire lock %s" %
                                lock_path)

        # If we aren't blocking, it's possible we didn't get the lock
        # because someone else has it.
        if not have_lock:
            raise LockException("Did not get lock on %s" % lock_path)

        node.lock = lock

    def unlockNode(self, node):
        '''
        Unlock a node.

        The node must already have been locked.

        :param Node node: The node which should be unlocked.
        '''

        if node.lock is None:
            raise LockException("Node %s does not hold a lock" % (node, ))
        node.lock.release()
        node.lock = None

    def lockNodeRequest(self, request, blocking=True, timeout=None):
        '''
        Lock a node request.

        This will set the `lock` attribute of the request object when the
        lock is successfully acquired.

        :param NodeRequest request: The request to lock.
        :param bool blocking: Whether or not to block on trying to
            acquire the lock
        :param int timeout: When blocking, how long to wait for the lock
            to get acquired. None, the default, waits forever.

        :raises: TimeoutException if we failed to acquire the lock when
            blocking with a timeout. ZKLockException if we are not blocking
            and could not get the lock, or a lock is already held.
        '''

        path = "%s/%s" % (self.REQUEST_LOCK_ROOT, request.id)
        try:
            lock = Lock(self.client, path)
            have_lock = lock.acquire(blocking, timeout)
        except kze.LockTimeout:
            raise LockException("Timeout trying to acquire lock %s" % path)
        except kze.NoNodeError:
            have_lock = False
            self.log.error("Request not found for locking: %s", request)

        # If we aren't blocking, it's possible we didn't get the lock
        # because someone else has it.
        if not have_lock:
            raise LockException("Did not get lock on %s" % path)

        request.lock = lock
        self.updateNodeRequest(request)

    def unlockNodeRequest(self, request):
        '''
        Unlock a node request.

        The request must already have been locked.

        :param NodeRequest request: The request to unlock.

        :raises: ZKLockException if the request is not currently locked.
        '''
        if request.lock is None:
            raise LockException("Request %s does not hold a lock" % request)
        request.lock.release()
        request.lock = None

    def heldNodeCount(self, autohold_key):
        '''
        Count the number of nodes being held for the given tenant/project/job.

        :param set autohold_key: A set with the tenant/project/job names.
        '''
        identifier = " ".join(autohold_key)
        try:
            nodes = self.client.get_children(self.NODE_ROOT)
        except kze.NoNodeError:
            return 0

        count = 0
        for nodeid in nodes:
            node_path = '%s/%s' % (self.NODE_ROOT, nodeid)
            try:
                node_data, node_stat = self.client.get(node_path)
            except kze.NoNodeError:
                # Node got removed on us. Just ignore.
                continue

            if not node_data:
                self.log.warning("Node ID %s has no data", nodeid)
                continue
            node_data = self._strToDict(node_data)
            if (node_data['state'] == zuul.model.STATE_HOLD
                    and node_data.get('hold_job') == identifier):
                count += 1
        return count

    # Copy of nodepool/zk.py begins here
    NODE_ROOT = "/nodepool/nodes"
    LAUNCHER_ROOT = "/nodepool/launchers"

    def _bytesToDict(self, data):
        return json.loads(data.decode('utf8'))

    def _launcherPath(self, launcher):
        return "%s/%s" % (self.LAUNCHER_ROOT, launcher)

    def _nodePath(self, node):
        return "%s/%s" % (self.NODE_ROOT, node)

    def getRegisteredLaunchers(self):
        '''
        Get a list of all launchers that have registered with ZooKeeper.

        :returns: A list of Launcher objects, or empty list if none are found.
        '''
        try:
            launcher_ids = self.client.get_children(self.LAUNCHER_ROOT)
        except kze.NoNodeError:
            return []

        objs = []
        for launcher in launcher_ids:
            path = self._launcherPath(launcher)
            try:
                data, _ = self.client.get(path)
            except kze.NoNodeError:
                # launcher disappeared
                continue

            objs.append(Launcher.fromDict(self._bytesToDict(data)))
        return objs

    def getNodes(self):
        '''
        Get the current list of all nodes.

        :returns: A list of nodes.
        '''
        try:
            return self.client.get_children(self.NODE_ROOT)
        except kze.NoNodeError:
            return []

    def getNode(self, node):
        '''
        Get the data for a specific node.

        :param str node: The node ID.

        :returns: The node data, or None if the node was not found.
        '''
        path = self._nodePath(node)
        try:
            data, stat = self.client.get(path)
        except kze.NoNodeError:
            return None
        if not data:
            return None

        d = self._bytesToDict(data)
        d['id'] = node
        return d

    def nodeIterator(self):
        '''
        Utility generator method for iterating through all nodes.
        '''
        for node_id in self.getNodes():
            node = self.getNode(node_id)
            if node:
                yield node

    def getHoldRequests(self):
        '''
        Get the current list of all hold requests.
        '''
        try:
            return sorted(self.client.get_children(self.HOLD_REQUEST_ROOT))
        except kze.NoNodeError:
            return []

    def getHoldRequest(self, hold_request_id):
        path = self.HOLD_REQUEST_ROOT + "/" + hold_request_id
        try:
            data, stat = self.client.get(path)
        except kze.NoNodeError:
            return None
        if not data:
            return None

        obj = zuul.model.HoldRequest.fromDict(self._strToDict(data))
        obj.id = hold_request_id
        obj.stat = stat
        return obj

    def storeHoldRequest(self, hold_request):
        '''
        Create or update a hold request.

        If this is a new request with no value for the `id` attribute of the
        passed in request, then `id` will be set with the unique request
        identifier after successful creation.

        :param HoldRequest hold_request: Object representing the hold request.
        '''
        if hold_request.id is None:
            path = self.client.create(self.HOLD_REQUEST_ROOT + "/",
                                      value=hold_request.serialize(),
                                      sequence=True,
                                      makepath=True)
            hold_request.id = path.split('/')[-1]
        else:
            path = self.HOLD_REQUEST_ROOT + "/" + hold_request.id
            self.client.set(path, hold_request.serialize())

    def _markHeldNodesAsUsed(self, hold_request):
        '''
        Changes the state for each held node for the hold request to 'used'.

        :returns: True if all nodes marked USED, False otherwise.
        '''
        def getHeldNodeIDs(request):
            node_ids = []
            for data in request.nodes:
                # TODO(Shrews): Remove type check at some point.
                # When autoholds were initially changed to be stored in ZK,
                # the node IDs were originally stored as a list of strings.
                # A later change embedded them within a dict. Handle both
                # cases here to deal with the upgrade.
                if isinstance(data, dict):
                    node_ids += data['nodes']
                else:
                    node_ids.append(data)
            return node_ids

        failure = False
        for node_id in getHeldNodeIDs(hold_request):
            node = self.getNode(node_id)
            if not node or node['state'] == zuul.model.STATE_USED:
                continue

            node['state'] = zuul.model.STATE_USED

            name = None
            label = None
            if 'name' in node:
                name = node['name']
            if 'label' in node:
                label = node['label']

            node_obj = zuul.model.Node(name, label)
            node_obj.updateFromDict(node)

            try:
                self.lockNode(node_obj, blocking=False)
                self.storeNode(node_obj)
            except Exception:
                self.log.exception(
                    "Cannot change HELD node state to USED "
                    "for node %s in request %s", node_obj.id, hold_request.id)
                failure = True
            finally:
                try:
                    if node_obj.lock:
                        self.unlockNode(node_obj)
                except Exception:
                    self.log.exception(
                        "Failed to unlock HELD node %s for request %s",
                        node_obj.id, hold_request.id)

        return not failure

    def deleteHoldRequest(self, hold_request):
        '''
        Delete a hold request.

        :param HoldRequest hold_request: Object representing the hold request.
        '''
        if not self._markHeldNodesAsUsed(hold_request):
            self.log.info(
                "Unable to delete hold request %s because "
                "not all nodes marked as USED.", hold_request.id)
            return

        path = self.HOLD_REQUEST_ROOT + "/" + hold_request.id
        try:
            self.client.delete(path, recursive=True)
        except kze.NoNodeError:
            pass

    def lockHoldRequest(self, request, blocking=True, timeout=None):
        '''
        Lock a node request.

        This will set the `lock` attribute of the request object when the
        lock is successfully acquired.

        :param HoldRequest request: The hold request to lock.
        '''
        if not request.id:
            raise LockException(
                "Hold request without an ID cannot be locked: %s" % request)

        path = "%s/%s/lock" % (self.HOLD_REQUEST_ROOT, request.id)
        try:
            lock = Lock(self.client, path)
            have_lock = lock.acquire(blocking, timeout)
        except kze.LockTimeout:
            raise LockException("Timeout trying to acquire lock %s" % path)

        # If we aren't blocking, it's possible we didn't get the lock
        # because someone else has it.
        if not have_lock:
            raise LockException("Did not get lock on %s" % path)

        request.lock = lock

    def unlockHoldRequest(self, request):
        '''
        Unlock a hold request.

        The request must already have been locked.

        :param HoldRequest request: The request to unlock.

        :raises: ZKLockException if the request is not currently locked.
        '''
        if request.lock is None:
            raise LockException("Request %s does not hold a lock" % request)
        request.lock.release()
        request.lock = None
Ejemplo n.º 5
0
class KazooTreeCacheTests(KazooTestCase):

    def setUp(self):
        super(KazooTreeCacheTests, self).setUp()
        self._event_queue = self.client.handler.queue_impl()
        self._error_queue = self.client.handler.queue_impl()
        self.path = None
        self.cache = None

    def tearDown(self):
        super(KazooTreeCacheTests, self).tearDown()
        if not self._error_queue.empty():
            try:
                raise self._error_queue.get()
            except FakeException:
                pass

    def make_cache(self):
        if self.cache is None:
            self.path = '/' + uuid.uuid4().hex
            self.cache = TreeCache(self.client, self.path)
            self.cache.listen(lambda event: self._event_queue.put(event))
            self.cache.listen_fault(lambda error: self._error_queue.put(error))
            self.cache.start()
        return self.cache

    def wait_cache(self, expect=None, since=None, timeout=10):
        started = since is None
        while True:
            event = self._event_queue.get(timeout=timeout)
            if started:
                if expect is not None:
                    eq_(event.event_type, expect)
                return event
            if event.event_type == since:
                started = True
                if expect is None:
                    return

    def spy_client(self, method_name):
        method = getattr(self.client, method_name)
        return patch.object(self.client, method_name, wraps=method)

    def test_start(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        stat = self.client.exists(self.path)
        eq_(stat.version, 0)

        eq_(self.cache._state, TreeCache.STATE_STARTED)
        eq_(self.cache._root._state, TreeNode.STATE_LIVE)

    @raises(KazooException)
    def test_start_started(self):
        self.make_cache()
        self.cache.start()

    @raises(KazooException)
    def test_start_closed(self):
        self.make_cache()
        self.cache.start()
        self.cache.close()
        self.cache.start()

    def test_close(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.client.create(self.path + '/foo/bar/baz', makepath=True)
        for _ in range(3):
            self.wait_cache(TreeEvent.NODE_ADDED)

        self.cache.close()

        # nothing should be published since tree closed
        ok_(self._event_queue.empty())

        # tree should be empty
        eq_(self.cache._root._children, {})
        eq_(self.cache._root._data, None)
        eq_(self.cache._state, TreeCache.STATE_CLOSED)

        # node state should not be changed
        assert_not_equal(self.cache._root._state, TreeNode.STATE_DEAD)

    def test_children_operation(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        self.client.create(self.path + '/test_children', b'test_children_1')
        event = self.wait_cache(TreeEvent.NODE_ADDED)
        eq_(event.event_type, TreeEvent.NODE_ADDED)
        eq_(event.event_data.path, self.path + '/test_children')
        eq_(event.event_data.data, b'test_children_1')
        eq_(event.event_data.stat.version, 0)

        self.client.set(self.path + '/test_children', b'test_children_2')
        event = self.wait_cache(TreeEvent.NODE_UPDATED)
        eq_(event.event_type, TreeEvent.NODE_UPDATED)
        eq_(event.event_data.path, self.path + '/test_children')
        eq_(event.event_data.data, b'test_children_2')
        eq_(event.event_data.stat.version, 1)

        self.client.delete(self.path + '/test_children')
        event = self.wait_cache(TreeEvent.NODE_REMOVED)
        eq_(event.event_type, TreeEvent.NODE_REMOVED)
        eq_(event.event_data.path, self.path + '/test_children')
        eq_(event.event_data.data, b'test_children_2')
        eq_(event.event_data.stat.version, 1)

    def test_subtree_operation(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        self.client.create(self.path + '/foo/bar/baz', makepath=True)
        for relative_path in ('/foo', '/foo/bar', '/foo/bar/baz'):
            event = self.wait_cache(TreeEvent.NODE_ADDED)
            eq_(event.event_type, TreeEvent.NODE_ADDED)
            eq_(event.event_data.path, self.path + relative_path)
            eq_(event.event_data.data, b'')
            eq_(event.event_data.stat.version, 0)

        self.client.delete(self.path + '/foo', recursive=True)
        for relative_path in ('/foo/bar/baz', '/foo/bar', '/foo'):
            event = self.wait_cache(TreeEvent.NODE_REMOVED)
            eq_(event.event_type, TreeEvent.NODE_REMOVED)
            eq_(event.event_data.path, self.path + relative_path)

    def test_get_data(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.client.create(self.path + '/foo/bar/baz', b'@', makepath=True)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_data(self.path).data, b'')
            eq_(cache.get_data(self.path).stat.version, 0)

            eq_(cache.get_data(self.path + '/foo').data, b'')
            eq_(cache.get_data(self.path + '/foo').stat.version, 0)

            eq_(cache.get_data(self.path + '/foo/bar').data, b'')
            eq_(cache.get_data(self.path + '/foo/bar').stat.version, 0)

            eq_(cache.get_data(self.path + '/foo/bar/baz').data, b'@')
            eq_(cache.get_data(self.path + '/foo/bar/baz').stat.version, 0)

    def test_get_children(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.client.create(self.path + '/foo/bar/baz', b'@', makepath=True)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)
        self.wait_cache(TreeEvent.NODE_ADDED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_children(self.path + '/foo/bar/baz'), frozenset())
            eq_(cache.get_children(self.path + '/foo/bar'), frozenset(['baz']))
            eq_(cache.get_children(self.path + '/foo'), frozenset(['bar']))
            eq_(cache.get_children(self.path), frozenset(['foo']))

    @raises(ValueError)
    def test_get_data_out_of_tree(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.cache.get_data('/out_of_tree')

    @raises(ValueError)
    def test_get_children_out_of_tree(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)
        self.cache.get_children('/out_of_tree')

    def test_get_data_no_node(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_data(self.path + '/non_exists'), None)

    def test_get_children_no_node(self):
        cache = self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        with patch.object(cache, '_client'):  # disable any remote operation
            eq_(cache.get_children(self.path + '/non_exists'), None)

    def test_session_reconnected(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        self.client.create(self.path + '/foo')
        event = self.wait_cache(TreeEvent.NODE_ADDED)
        eq_(event.event_data.path, self.path + '/foo')

        with self.spy_client('get_async') as get_data:
            with self.spy_client('get_children_async') as get_children:
                # session suspended
                self.lose_connection(self.client.handler.event_object)
                self.wait_cache(TreeEvent.CONNECTION_SUSPENDED)

                # There are a serial refreshing operation here. But NODE_ADDED
                # events will not be raised because the zxid of nodes are the
                # same during reconnecting.

                # connection restore
                self.wait_cache(TreeEvent.CONNECTION_RECONNECTED)

                # wait for outstanding operations
                while self.cache._outstanding_ops > 0:
                    self.client.handler.sleep_func(0.1)

                # inspect in-memory nodes
                _node_root = self.cache._root
                _node_foo = self.cache._root._children['foo']

                # make sure that all nodes are refreshed
                get_data.assert_has_calls([
                    call(self.path, watch=_node_root._process_watch),
                    call(self.path + '/foo', watch=_node_foo._process_watch),
                ], any_order=True)
                get_children.assert_has_calls([
                    call(self.path, watch=_node_root._process_watch),
                    call(self.path + '/foo', watch=_node_foo._process_watch),
                ], any_order=True)

    def test_root_recreated(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        # remove root node
        self.client.delete(self.path)
        event = self.wait_cache(TreeEvent.NODE_REMOVED)
        eq_(event.event_type, TreeEvent.NODE_REMOVED)
        eq_(event.event_data.data, b'')
        eq_(event.event_data.path, self.path)
        eq_(event.event_data.stat.version, 0)

        # re-create root node
        self.client.ensure_path(self.path)
        event = self.wait_cache(TreeEvent.NODE_ADDED)
        eq_(event.event_type, TreeEvent.NODE_ADDED)
        eq_(event.event_data.data, b'')
        eq_(event.event_data.path, self.path)
        eq_(event.event_data.stat.version, 0)

        self.assertTrue(
            self.cache._outstanding_ops >= 0,
            'unexpected outstanding ops %r' % self.cache._outstanding_ops)

    def test_exception_handler(self):
        error_value = FakeException()
        error_handler = Mock()

        with patch.object(TreeNode, 'on_deleted') as on_deleted:
            on_deleted.side_effect = [error_value]

            self.make_cache()
            self.cache.listen_fault(error_handler)

            self.cache.close()
            error_handler.assert_called_once_with(error_value)

    def test_exception_suppressed(self):
        self.make_cache()
        self.wait_cache(since=TreeEvent.INITIALIZED)

        # stoke up ConnectionClosedError
        self.client.stop()
        self.client.close()
        self.client.handler.start()  # keep the async completion
        self.wait_cache(since=TreeEvent.CONNECTION_LOST)

        with patch.object(TreeNode, 'on_created') as on_created:
            self.cache._root._call_client('exists', '/')
            self.cache._root._call_client('get', '/')
            self.cache._root._call_client('get_children', '/')

            self.wait_cache(since=TreeEvent.INITIALIZED)
            on_created.assert_not_called()
            eq_(self.cache._outstanding_ops, 0)