def test_existing_zk(self): """ ClusterManager needs to be able to recover from an existing ZK group for scheduler failover. """ manager = ClusterManager(self.client, "/home/my_cluster") instance1 = ServiceInstance(Endpoint("host1", 10000)) member1 = manager.add_member(instance1) instance2 = ServiceInstance(Endpoint("host2", 10000)) member2 = manager.add_member(instance2) assert (self.storage.paths["/home/my_cluster/slaves/member_0000000000"] ["data"] == ServiceInstance.pack(instance1)) assert (self.storage.paths["/home/my_cluster/slaves/member_0000000001"] ["data"] == ServiceInstance.pack(instance2)) manager.promote_member(member1) # Test the new ClusterManager. manager2 = ClusterManager(self.client, "/home/my_cluster") assert len(manager2._cluster.members) == 2 assert member1 in manager2._cluster.members assert member2 in manager2._cluster.members assert manager2._cluster.members[member1] == ServiceInstance.pack( instance1)
def test_callbacks(self): manager = ClusterManager(self.client, "/home/my_cluster") # Set up 2 listeners. instance1 = ServiceInstance(Endpoint("host1", 10000)) handler1 = CallbackHandler() listener1 = ClusterListener(self.client, "/home/my_cluster", instance1, handler1.promotion_callback, handler1.demotion_callback, handler1.master_callback, handler1.termination_callback) listener1.start() member1 = manager.add_member(instance1) instance2 = ServiceInstance(Endpoint("host2", 10000)) handler2 = CallbackHandler() listener2 = ClusterListener(self.client, "/home/my_cluster", instance2, handler2.promotion_callback, handler2.demotion_callback, handler2.master_callback) listener2.start() member2 = manager.add_member(instance2) # Test promotion. manager.promote_member(member1) assert handler1.promoted.wait(1) assert handler2.detected.get(True, 1) == instance1 assert (self.storage.paths["/home/my_cluster/master/member_0000000000"] ["data"] == ServiceInstance.pack(instance1)) assert (self.storage.paths["/home/my_cluster/slaves/member_0000000001"] ["data"] == ServiceInstance.pack(instance2)) manager.promote_member(member2) assert handler1.demoted.wait(1) assert handler2.promoted.wait(1) assert (self.storage.paths["/home/my_cluster/master/member_0000000001"] ["data"] == ServiceInstance.pack(instance2)) assert "/home/my_cluster/master/member_0000000000" not in self.storage.paths manager.remove_member(member2) assert handler2.demoted.wait(1) # Test removing cluster. manager.remove_member(member1) manager.delete_cluster() assert handler1.terminated.wait(1)
def add_member(self, service_instance): """ Add the member to the ZooKeeper group. NOTE: - New members are slaves until being promoted. - A new member is not added if the specified service_instance already exists in the group. :return: The member ID for the ServiceInstance generated by ZooKeeper. """ if not isinstance(service_instance, ServiceInstance): raise TypeError("'service_instance' should be a ServiceInstance") content = ServiceInstance.pack(service_instance) for k, v in self._cluster.members.items(): if content == v: log.info("%s not added because it already exists in the group" % service_instance) return k znode_path = self._client.create( posixpath.join(self._cluster.slaves_group, self._cluster.MEMBER_PREFIX), content, sequence=True) _, member_id = posixpath.split(znode_path) with self._lock: self._cluster.members[member_id] = content return member_id
def __init__(self, client, cluster_path, self_instance=None, promotion_callback=None, demotion_callback=None, master_callback=None, termination_callback=None): """ :param client: Kazoo client. :param cluster_path: The path for this cluster on ZooKeeper. :param self_instance: The local ServiceInstance associated with this listener. :param promotion_callback: Invoked when 'self_instance' is promoted. :param demotion_callback: Invoked when 'self_instance' is demoted. :param master_callback: Invoked when there is a master change otherwise. :param termination_callback: Invoked when the cluster is terminated. NOTE: Callbacks are executed synchronously in Kazoo's completion thread to ensure the delivery order of events. Blocking the callback method means no future callbacks will be invoked. """ self._client = client self._cluster = Cluster(cluster_path) self._self_content = ServiceInstance.pack(self_instance) if self_instance else None self._master = None self._master_content = None self._promotion_callback = promotion_callback or (lambda: True) self._demotion_callback = demotion_callback or (lambda: True) self._master_callback = master_callback or (lambda x: True) self._termination_callback = termination_callback or (lambda: True) self._children_watch = None # Set when the watcher detects that the master group exists.
def test_existing_zk(self): """ ClusterManager needs to be able to recover from an existing ZK group for scheduler failover. """ manager = ClusterManager(self.client, "/home/my_cluster") instance1 = ServiceInstance(Endpoint("host1", 10000)) member1 = manager.add_member(instance1) instance2 = ServiceInstance(Endpoint("host2", 10000)) member2 = manager.add_member(instance2) assert self.storage.paths["/home/my_cluster/slaves/member_0000000000"]["data"] == ServiceInstance.pack( instance1 ) assert self.storage.paths["/home/my_cluster/slaves/member_0000000001"]["data"] == ServiceInstance.pack( instance2 ) manager.promote_member(member1) # Test the new ClusterManager. manager2 = ClusterManager(self.client, "/home/my_cluster") assert len(manager2._cluster.members) == 2 assert member1 in manager2._cluster.members assert member2 in manager2._cluster.members assert manager2._cluster.members[member1] == ServiceInstance.pack(instance1)
def add_member(self, service_instance): """ Add the member to the ZooKeeper group. NOTE: - New members are slaves until being promoted. - A new member is not added if the specified service_instance already exists in the group. :return: The member ID for the ServiceInstance generated by ZooKeeper. """ if not isinstance(service_instance, ServiceInstance): raise TypeError("'service_instance' should be a ServiceInstance") content = ServiceInstance.pack(service_instance) for k, v in self._cluster.members.items(): if content == v: log.info( "%s not added because it already exists in the group" % service_instance) return k znode_path = self._client.create(posixpath.join( self._cluster.slaves_group, self._cluster.MEMBER_PREFIX), content, sequence=True) _, member_id = posixpath.split(znode_path) with self._lock: self._cluster.members[member_id] = content return member_id
def __init__(self, client, cluster_path, self_instance=None, promotion_callback=None, demotion_callback=None, master_callback=None, termination_callback=None): """ :param client: Kazoo client. :param cluster_path: The path for this cluster on ZooKeeper. :param self_instance: The local ServiceInstance associated with this listener. :param promotion_callback: Invoked when 'self_instance' is promoted. :param demotion_callback: Invoked when 'self_instance' is demoted. :param master_callback: Invoked when there is a master change otherwise. :param termination_callback: Invoked when the cluster is terminated. NOTE: Callbacks are executed synchronously in Kazoo's completion thread to ensure the delivery order of events. Blocking the callback method means no future callbacks will be invoked. """ self._client = client self._cluster = Cluster(cluster_path) self._self_content = ServiceInstance.pack( self_instance) if self_instance else None self._master = None self._master_content = None self._promotion_callback = promotion_callback or (lambda: True) self._demotion_callback = demotion_callback or (lambda: True) self._master_callback = master_callback or (lambda x: True) self._termination_callback = termination_callback or (lambda: True) self._children_watch = None # Set when the watcher detects that the master group exists.
def test_add_member(self): manager = ClusterManager(self.client, "/home/my_cluster") instance1 = ServiceInstance(Endpoint("host1", 10000)) member1 = manager.add_member(instance1) assert member1 == manager.add_member( instance1) # Second insertion is ignored. instance2 = ServiceInstance(Endpoint("host2", 10000)) manager.add_member(instance2) assert len(manager._cluster.members) == 2 assert (self.storage.paths["/home/my_cluster/slaves/member_0000000000"] ["data"] == ServiceInstance.pack(instance1)) assert (self.storage.paths["/home/my_cluster/slaves/member_0000000001"] ["data"] == ServiceInstance.pack(instance2))
def test_promote_member(self): manager = ClusterManager(self.client, "/home/my_cluster") instance = ServiceInstance(Endpoint("host", 10000)) member = manager.add_member(instance) assert manager.promote_member(member) assert not manager.promote_member( member) # The 2nd promotion is a no-op. assert (self.storage.paths["/home/my_cluster/master/member_0000000000"] ["data"] == ServiceInstance.pack(instance))
def test_service_instance_to_json(): json = """{ "additionalEndpoints": { "aurora": { "host": "hostname", "inet6": "2001:db8:1234:ffff:ffff:ffff:ffff:ffff", "port": 22 }, "health": { "host": "hostname", "inet": "1.2.3.4", "port": 23 }, "http": { "host": "hostname", "inet": "1.2.3.4", "inet6": "2001:db8:1234:ffff:ffff:ffff:ffff:ffff", "port": 23 } }, "serviceEndpoint": { "host": "hostname", "port": 24 }, "shard": 1, "status": "ALIVE" }""" service_instance = ServiceInstance( Endpoint("hostname", 24), { "aurora": Endpoint("hostname", 22, "1.2.3.4"), "health": Endpoint("hostname", 23, None, "2001:db8:1234:ffff:ffff:ffff:ffff:ffff"), "http": Endpoint("hostname", 23, "1.2.3.4", "2001:db8:1234:ffff:ffff:ffff:ffff:ffff"), }, 'ALIVE', 1) assert ServiceInstance.unpack(json) == service_instance assert ServiceInstance.unpack( ServiceInstance.pack(service_instance)) == service_instance
def test_service_instance_to_json(): json = """{ "additionalEndpoints": { "aurora": { "host": "hostname", "inet6": "2001:db8:1234:ffff:ffff:ffff:ffff:ffff", "port": 22 }, "health": { "host": "hostname", "inet": "1.2.3.4", "port": 23 }, "http": { "host": "hostname", "inet": "1.2.3.4", "inet6": "2001:db8:1234:ffff:ffff:ffff:ffff:ffff", "port": 23 } }, "serviceEndpoint": { "host": "hostname", "port": 24 }, "shard": 1, "status": "ALIVE" }""" service_instance = ServiceInstance( Endpoint("hostname", 24), {"aurora": Endpoint("hostname", 22, "1.2.3.4"), "health": Endpoint("hostname", 23, None, "2001:db8:1234:ffff:ffff:ffff:ffff:ffff"), "http": Endpoint("hostname", 23, "1.2.3.4", "2001:db8:1234:ffff:ffff:ffff:ffff:ffff"), }, 'ALIVE', 1 ) assert ServiceInstance.unpack(json) == service_instance assert ServiceInstance.unpack(ServiceInstance.pack(service_instance)) == service_instance
def test_callbacks(self): manager = ClusterManager(self.client, "/home/my_cluster") # Set up 2 listeners. instance1 = ServiceInstance(Endpoint("host1", 10000)) handler1 = CallbackHandler() listener1 = ClusterListener( self.client, "/home/my_cluster", instance1, handler1.promotion_callback, handler1.demotion_callback, handler1.master_callback, handler1.termination_callback, ) listener1.start() member1 = manager.add_member(instance1) instance2 = ServiceInstance(Endpoint("host2", 10000)) handler2 = CallbackHandler() listener2 = ClusterListener( self.client, "/home/my_cluster", instance2, handler2.promotion_callback, handler2.demotion_callback, handler2.master_callback, ) listener2.start() member2 = manager.add_member(instance2) # Test promotion. manager.promote_member(member1) assert handler1.promoted.wait(1) assert handler2.detected.get(True, 1) == instance1 assert self.storage.paths["/home/my_cluster/master/member_0000000000"]["data"] == ServiceInstance.pack( instance1 ) assert self.storage.paths["/home/my_cluster/slaves/member_0000000001"]["data"] == ServiceInstance.pack( instance2 ) manager.promote_member(member2) assert handler1.demoted.wait(1) assert handler2.promoted.wait(1) assert self.storage.paths["/home/my_cluster/master/member_0000000001"]["data"] == ServiceInstance.pack( instance2 ) assert "/home/my_cluster/master/member_0000000000" not in self.storage.paths manager.remove_member(member2) assert handler2.demoted.wait(1) # Test removing cluster. manager.remove_member(member1) manager.delete_cluster() assert handler1.terminated.wait(1)
def test_promote_member(self): manager = ClusterManager(self.client, "/home/my_cluster") instance = ServiceInstance(Endpoint("host", 10000)) member = manager.add_member(instance) assert manager.promote_member(member) assert not manager.promote_member(member) # The 2nd promotion is a no-op. assert self.storage.paths["/home/my_cluster/master/member_0000000000"]["data"] == ServiceInstance.pack(instance)
def test_add_member(self): manager = ClusterManager(self.client, "/home/my_cluster") instance1 = ServiceInstance(Endpoint("host1", 10000)) member1 = manager.add_member(instance1) assert member1 == manager.add_member(instance1) # Second insertion is ignored. instance2 = ServiceInstance(Endpoint("host2", 10000)) manager.add_member(instance2) assert len(manager._cluster.members) == 2 assert self.storage.paths["/home/my_cluster/slaves/member_0000000000"]["data"] == ServiceInstance.pack( instance1 ) assert self.storage.paths["/home/my_cluster/slaves/member_0000000001"]["data"] == ServiceInstance.pack( instance2 )