def test_wait_for_initialize(self): """ A connection to a zookeeper that is running, but whose juju state is not ready, should wait until that state is ready. """ client = ZookeeperClient() self.client = client # for poke_zk self.mock_connect(False, succeed(client)) self.mocker.replay() zookeeper.set_debug_level(0) yield client.connect(get_test_zookeeper_address()) provider = DummyProvider(ProviderMachine("i-amok", "foo.example.com")) d = provider.connect() client_result = [] d.addCallback(client_result.append) # Give it a chance to do it incorrectly. yield self.poke_zk() try: self.assertEquals(client_result, []) yield client.create("/initialized") yield d self.assertTrue(client_result, client_result) self.assertIdentical(client_result[0], client) finally: deleteTree("/", client.handle) client.close()
def setUp(self): self.log = self.capture_logging("juju.state.init") zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) self.identity = make_identity("admin:genie") self.layout = StateHierarchy(self.client, self.identity, "i-abcdef", "dummy") return self.client.connect()
def test_client_session_expiration_event(self): """A client which recieves a session expiration event. """ session_events = [] events_received = Deferred() def session_event_callback(connection, e): session_events.append(e) if len(session_events) == 8: events_received.callback(True) # Connect to a node in the cluster and establish a watch self.client = ZookeeperClient(self.cluster[0].address) self.client.set_session_callback(session_event_callback) yield self.client.connect() # Setup some watches to verify they are cleaned out on expiration. d, e_watch_d = self.client.exists_and_watch("/") yield d d, g_watch_d = self.client.get_and_watch("/") yield d d, c_watch_d = self.client.get_children_and_watch("/") yield d # Connect a client to the same session on a different node. self.client2 = ZookeeperClient(self.cluster[1].address) yield self.client2.connect(client_id=self.client.client_id) # Close the new client and wait for the event propogation yield self.client2.close() # It can take some time for this to propagate yield events_received self.assertEqual(len(session_events), 8) # The last four (conn + 3 watches) are all expired for evt in session_events[4:]: self.assertEqual(evt.state_name, "expired") # The connection is dead without reconnecting. yield self.assertFailure( self.client.exists("/"), NotConnectedException, ConnectionException) self.assertTrue(self.client.unrecoverable) yield self.assertFailure(e_watch_d, zookeeper.SessionExpiredException) yield self.assertFailure(g_watch_d, zookeeper.SessionExpiredException) yield self.assertFailure(c_watch_d, zookeeper.SessionExpiredException) # If a reconnect attempt is made with a dead session id client_id = self.client.client_id self.client.close() # Free the handle yield self.client.connect(client_id=client_id) yield self.assertFailure( self.client.get_children("/"), NotConnectedException, ConnectionException)
def tearDown(self): # Close and reopen connection, so that watches set during # testing are not affected by the cleaning up. self.client.close() client = ZookeeperClient(get_test_zookeeper_address()) yield client.connect() deleteTree(handle=client.handle) client.close() yield super(StateTestBase, self).tearDown()
def test_watch_stops_on_closed_connection(self): """Verify watches stops when the connection is closed.""" # Use a separate client connection for watching so it can be # disconnected. watch_client = ZookeeperClient(get_test_zookeeper_address()) yield watch_client.connect() watch_base = StateBase(watch_client) wait_callback = Deferred() finish_callback = Deferred() calls = [] def watcher(old_topology, new_topology): calls.append((old_topology, new_topology)) wait_callback.callback(True) return finish_callback # Start watching. yield watch_base._watch_topology(watcher) # Create the topology. topology = InternalTopology() topology.add_machine("m-0") yield self.set_topology(topology) # Hold off until callback is started. yield wait_callback # Change the topology. topology.add_machine("m-1") yield self.set_topology(topology) # Ensure that the watch has been called just once so far # (although still pending due to the finish_callback). self.assertEquals(len(calls), 1) # Now disconnect the client. watch_client.close() self.assertFalse(watch_client.connected) self.assertTrue(self.client.connected) # Change the topology again. topology.add_machine("m-2") yield self.set_topology(topology) # Allow the first call to be completed, starting a process of # watching for the next change. At this point, the watch will # encounter that the client is disconnected. finish_callback.callback(True) # Give a chance for something bad to happen. yield self.poke_zk() # Ensure the watch was still not called. self.assertEquals(len(calls), 1)
class LayoutTest(TestCase): def setUp(self): self.log = self.capture_logging("juju.state.init") zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) self.identity = make_identity("admin:genie") self.layout = StateHierarchy( self.client, self.identity, "i-abcdef", "dummy") return self.client.connect() def tearDown(self): deleteTree(handle=self.client.handle) self.client.close() @inlineCallbacks def assert_existence_and_acl(self, path): exists = yield self.client.exists(path) self.assertTrue(exists) acls, stat = yield self.client.get_acl(path) found_admin_acl = False for acl in acls: if acl["id"] == self.identity \ and acl["perms"] == zookeeper.PERM_ALL: found_admin_acl = True break self.assertTrue(found_admin_acl) @inlineCallbacks def test_initialize(self): yield self.layout.initialize() yield self.assert_existence_and_acl("/charms") yield self.assert_existence_and_acl("/services") yield self.assert_existence_and_acl("/units") yield self.assert_existence_and_acl("/machines") yield self.assert_existence_and_acl("/relations") yield self.assert_existence_and_acl("/initialized") machine_state_manager = MachineStateManager(self.client) machine_state = yield machine_state_manager.get_machine_state(0) self.assertTrue(machine_state) instance_id = yield machine_state.get_instance_id() self.assertEqual(instance_id, "i-abcdef") settings_manager = GlobalSettingsStateManager(self.client) self.assertEqual((yield settings_manager.get_provider_type()), "dummy") self.assertEqual( self.log.getvalue().strip(), "Initializing zookeeper hierarchy")
def open_client(self, credentials=None): """ Open a zookeeper client, optionally authenticating with the credentials if given. """ client = ZookeeperClient("127.0.0.1:2181") self.clients.append(client) yield client.connect() if credentials: d = client.add_auth("digest", credentials) # hack to keep auth fast yield client.exists("/") yield d returnValue(client)
def test_client_reconnect_session_on_different_server(self): """On connection failure, An application can choose to use a new connection with which to reconnect to a different member of the zookeeper cluster, reacquiring the extant session. A large obvious caveat to using a new client instance rather than reconnecting the existing client, is that even though the session has outstanding watches, the watch callbacks/deferreds won't be active unless the client instance used to create them is connected. """ session_events = [] def session_event_callback(connection, e): session_events.append(e) # Connect to a node in the cluster and establish a watch self.client = ZookeeperClient(self.cluster[2].address, session_timeout=5000) self.client.set_session_callback(session_event_callback) yield self.client.connect() yield self.client.create("/hello", flags=zookeeper.EPHEMERAL) self.assertTrue((yield self.client.exists("/hello"))) # Shutdown the server the client is connected to self.cluster[2].stop() yield self.sleep(0.1) # Verify we got a session event regarding the down server self.assertTrue(session_events) # Open up a new connection to a different server with same session self.client2 = ZookeeperClient(self.cluster[0].address) yield self.client2.connect(client_id=self.client.client_id) # Close the old disconnected client self.client.close() # Verify the ephemeral still exists exists = yield self.client2.exists("/hello") self.assertTrue(exists) # Destroy the session and reconnect self.client2.close() yield self.client.connect(self.cluster[0].address) # Ephemeral is destroyed when the session closed. exists = yield self.client.exists("/hello") self.assertFalse(exists)
class LayoutTest(TestCase): def setUp(self): self.log = self.capture_logging("juju.state.init") zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) self.identity = make_identity("admin:genie") self.layout = StateHierarchy(self.client, self.identity, "i-abcdef", "dummy") return self.client.connect() def tearDown(self): deleteTree(handle=self.client.handle) self.client.close() @inlineCallbacks def assert_existence_and_acl(self, path): exists = yield self.client.exists(path) self.assertTrue(exists) acls, stat = yield self.client.get_acl(path) found_admin_acl = False for acl in acls: if acl["id"] == self.identity \ and acl["perms"] == zookeeper.PERM_ALL: found_admin_acl = True break self.assertTrue(found_admin_acl) @inlineCallbacks def test_initialize(self): yield self.layout.initialize() yield self.assert_existence_and_acl("/charms") yield self.assert_existence_and_acl("/services") yield self.assert_existence_and_acl("/units") yield self.assert_existence_and_acl("/machines") yield self.assert_existence_and_acl("/relations") yield self.assert_existence_and_acl("/initialized") machine_state_manager = MachineStateManager(self.client) machine_state = yield machine_state_manager.get_machine_state(0) self.assertTrue(machine_state) instance_id = yield machine_state.get_instance_id() self.assertEqual(instance_id, "i-abcdef") settings_manager = GlobalSettingsStateManager(self.client) self.assertEqual((yield settings_manager.get_provider_type()), "dummy") self.assertEqual(self.log.getvalue().strip(), "Initializing zookeeper hierarchy")
def test_bad_version_error(self): """ The node captures the node version on any read operations, which it utilizes for write operations. On a concurrent modification error the node return a bad version error, this also clears the cached state so subsequent modifications will be against the latest version, unless the cache is seeded again by a read operation. """ node = ZNode("/zoo/lion", self.client) self.client2 = ZookeeperClient("127.0.0.1:2181") yield self.client2.connect() yield self.client.create("/zoo/lion", "mouse") yield node.get_data() yield self.client2.set("/zoo/lion", "den2") data = yield self.client.exists("/zoo/lion") self.assertEqual(data['version'], 1) d = node.set_data("zebra") self.failUnlessFailure(d, zookeeper.BadVersionException) yield d # after failure the cache is deleted, and a set proceeds yield node.set_data("zebra") data = yield node.get_data() self.assertEqual(data, "zebra")
def setUp(self): super(WatchDeliveryConnectionFailedTest, self).setUp() self.proxy = ProxyFactory("127.0.0.1", 2181) self.proxy_port = reactor.listenTCP(0, self.proxy) host = self.proxy_port.getHost() self.proxied_client = ZookeeperClient( "%s:%s" % (host.host, host.port)) self.direct_client = ZookeeperClient("127.0.0.1:2181", 3000) self.session_events = [] def session_event_collector(conn, event): self.session_events.append(event) self.proxied_client.set_session_callback(session_event_collector) return self.direct_client.connect()
def setUp(self): self.log = self.capture_logging("juju.state.init") zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) self.identity = make_identity("admin:genie") self.layout = StateHierarchy( self.client, self.identity, "i-abcdef", "dummy") return self.client.connect()
def connect(self, share=False): """Connect to juju's zookeeper. """ state = yield self.load_state() if not state: raise EnvironmentNotFound() client = yield ZookeeperClient(state["zookeeper-address"]).connect() yield ZookeeperConnect(self).wait_for_initialization(client) returnValue(client)
def setUp(self): super(CharmPublisherTest, self).setUp() zookeeper.set_debug_level(0) self.charm = CharmDirectory(self.sample_dir1) self.charm_id = local_charm_id(self.charm) self.charm_key = under.quote(self.charm_id) # provider storage key self.charm_storage_key = under.quote( "%s:%s" % (self.charm_id, self.charm.get_sha256())) self.client = ZookeeperClient(get_test_zookeeper_address()) self.storage_dir = self.makeDir() self.storage = FileStorage(self.storage_dir) self.publisher = CharmPublisher(self.client, self.storage) yield self.client.connect() yield self.client.create("/charms")
def test_watch_stops_on_early_closed_connection(self): """Verify watches stops when the connection is closed early. _watch_topology chains from an exists_and_watch to a get_and_watch. This test ensures that this chaining will fail gracefully if the connection is closed before this chaining can occur. """ # Use a separate client connection for watching so it can be # disconnected. watch_client = ZookeeperClient(get_test_zookeeper_address()) yield watch_client.connect() watch_base = StateBase(watch_client) calls = [] @inlineCallbacks def watcher(old_topology, new_topology): calls.append((old_topology, new_topology)) # Create the topology. topology = InternalTopology() topology.add_machine("m-0") yield self.set_topology(topology) # Now disconnect the client. watch_client.close() self.assertFalse(watch_client.connected) self.assertTrue(self.client.connected) # Start watching. yield watch_base._watch_topology(watcher) # Change the topology, this will trigger the watch. topology.add_machine("m-1") yield self.set_topology(topology) # Give a chance for something bad to happen. yield self.poke_zk() # Ensure the watcher was never called, because its client was # disconnected. self.assertEquals(len(calls), 0)
def connect(self, share=False): """Connect to the zookeeper juju running in the machine provider. @param share: Requests sharing of the connection with other clients attempting to connect to the same provider, if that's feasible. Unused for the dummy provider. """ return ZookeeperClient(os.environ.get("ZOOKEEPER_ADDRESS", "127.0.0.1:2181"), session_timeout=1000).connect()
def setUp(self): super(NodeTest, self).setUp() zookeeper.set_debug_level(zookeeper.LOG_LEVEL_ERROR) self.client = ZookeeperClient("127.0.0.1:2181", 2000) d = self.client.connect() self.client2 = None def create_zoo(client): client.create("/zoo") d.addCallback(create_zoo) return d
def test_managed_zookeeper(self): zookeeper.set_debug_level(0) # Start zookeeper data_dir = self.makeDir() instance = Zookeeper(data_dir, 12345) yield instance.start() self.assertTrue(instance.running) # Connect a client client = ZookeeperClient("127.0.1.1:12345") yield client.connect() stat = yield client.exists("/") yield client.close() self.assertTrue(stat) # Stop Instance yield instance.stop() self.assertFalse(instance.running) self.assertFalse(os.path.exists(data_dir))
def test_fast_connection(self): """Verify connection when requirements are available at time of conn. This includes /initialized is already set. In addition, also verifies that if multiple ZKs are available, one is selected via random.choice. """ log = self.capture_logging(level=logging.DEBUG) client = ZookeeperClient() self.mock_connect(False, succeed(client)) def get_choice(lst): for item in lst: if item.dns_name == "foo.example.com": return item raise AssertionError("expected choice not seen") self.patch(random, "choice", get_choice) self.mocker.replay() provider = DummyProvider( ProviderMachine("i-am-picked", "foo.example.com"), ProviderMachine("i-was-not", "bar.example.com")) zookeeper.set_debug_level(0) yield client.connect(get_test_zookeeper_address()) try: yield client.create("/initialized") yield provider.connect() self.assertEqual( "Connecting to environment...\n" "Connecting to environment using foo.example.com...\n" "Environment is initialized.\n" "Connected to environment.\n", log.getvalue()) finally: deleteTree("/", client.handle) client.close()
def connect(self): """Return an authenticated connection to the juju zookeeper.""" hosts = self.config["zookeeper_servers"] self.client = yield ZookeeperClient().connect(hosts) principals = self.config.get("principals", ()) for principal in principals: self.client.add_auth("digest", principal) # bug work around to keep auth fast if principals: yield self.client.exists("/") returnValue(self.client)
def test_client_watch_migration(self): """On server rotation, extant watches are still active. A client connected to multiple servers, will transparently migrate amongst them, as individual servers can no longer be reached. Watch deferreds issued from the same client instance will continue to function as the session is maintained. """ session_events = [] def session_event_callback(connection, e): session_events.append(e) # Connect to the Zookeeper Cluster servers = ",".join([s.address for s in self.cluster]) self.client = ZookeeperClient(servers) self.client.set_session_callback(session_event_callback) yield self.client.connect() # Setup a watch yield self.client.create("/hello") exists_d, watch_d = self.client.exists_and_watch("/hello") yield exists_d # Shutdown the server the client is connected to self.cluster[0].stop() # Wait for the shutdown and cycle, if we don't wait we'll # get occasionally get a zookeeper connectionloss exception. yield self.sleep(0.1) # The session events that would have been ignored are sent # to the session event callback. self.assertTrue(session_events) self.assertTrue(self.client.connected) # If we delete the node, we'll see the watch fire. yield self.client.delete("/hello") event = yield watch_d self.assertEqual(event.type_name, "deleted") self.assertEqual(event.path, "/hello")
def test_connection_error_handler(self): """A callback can be specified for connection errors. We can specify a callback for connection errors, that can perform recovery for a disconnected client, restablishing """ @inlineCallbacks def connection_error_handler(connection, error): # Moved management of this connection attribute out of the # default behavior for a connection exception, to support # the retry facade. Under the hood libzk is going to be # trying to transparently reconnect connection.connected = False # On loss of the connection, reconnect the client w/ same session. yield connection.connect( self.cluster[1].address, client_id=connection.client_id) returnValue(23) self.client = ZookeeperClient(self.cluster[0].address) self.client.set_connection_error_callback(connection_error_handler) yield self.client.connect() yield self.client.create("/hello") exists_d, watch_d = self.client.exists_and_watch("/hello") yield exists_d # Shutdown the server the client is connected to self.cluster[0].stop() yield self.sleep(0.1) # Results in connection loss exception, and invoking of error handler. result = yield self.client.exists("/hello") # The result of the error handler is returned to the api self.assertEqual(result, 23) exists = yield self.client.exists("/hello") self.assertTrue(exists)
class RemoveTreeTest(TestCase): @inlineCallbacks def setUp(self): yield super(RemoveTreeTest, self).setUp() zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) yield self.client.connect() @inlineCallbacks def test_remove_tree(self): yield self.client.create("/zoo") yield self.client.create("/zoo/mammals") yield self.client.create("/zoo/mammals/elephant") yield self.client.create("/zoo/reptiles") yield self.client.create("/zoo/reptiles/snake") yield remove_tree(self.client, "/zoo") children = yield self.client.get_children("/") self.assertNotIn("zoo", children)
def test_client_session_migration(self): """A client will automatically rotate servers to ensure a connection. A client connected to multiple servers, will transparently migrate amongst them, as individual servers can no longer be reached. A client's session will be maintined. """ # Connect to the Zookeeper Cluster servers = ",".join([s.address for s in self.cluster]) self.client = ZookeeperClient(servers) yield self.client.connect() yield self.client.create("/hello", flags=zookeeper.EPHEMERAL) # Shutdown the server the client is connected to self.cluster[0].stop() # Wait for the shutdown and cycle, if we don't wait we'll # get a zookeeper connectionloss exception on occassion. yield self.sleep(0.1) self.assertTrue(self.client.connected) exists = yield self.client.exists("/hello") self.assertTrue(exists)
def setUp(self): yield super(RemoveTreeTest, self).setUp() zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) yield self.client.connect()
from binascii import crc32 from functools import wraps from itertools import count import json import struct import sys from twisted.logger import Logger, globalLogBeginner, textFileLogObserver from twisted.internet.defer import Deferred, inlineCallbacks, maybeDeferred from twisted.internet.task import react from twisted.internet.protocol import Protocol, Factory from twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol from txzookeeper import ZookeeperClient zk = ZookeeperClient(servers='localhost:2181') from parsley import makeProtocol from characteristic import attributes, Attribute log = Logger() def encodeString(string): assert isinstance(string, bytes) return struct.pack('>h', len(string)) + string def encodeBytes(string): assert isinstance(string, bytes) return struct.pack('>i', len(string)) + string
class CharmPublisherTest(RepositoryTestBase): @inlineCallbacks def setUp(self): super(CharmPublisherTest, self).setUp() zookeeper.set_debug_level(0) self.charm = CharmDirectory(self.sample_dir1) self.charm_id = local_charm_id(self.charm) self.charm_key = under.quote(self.charm_id) # provider storage key self.charm_storage_key = under.quote( "%s:%s" % (self.charm_id, self.charm.get_sha256())) self.client = ZookeeperClient(get_test_zookeeper_address()) self.storage_dir = self.makeDir() self.storage = FileStorage(self.storage_dir) self.publisher = CharmPublisher(self.client, self.storage) yield self.client.connect() yield self.client.create("/charms") def tearDown(self): deleteTree("/", self.client.handle) self.client.close() super(CharmPublisherTest, self).tearDown() @inlineCallbacks def test_add_charm_and_publish(self): open_file_count = _count_open_files() yield self.publisher.add_charm(self.charm_id, self.charm) result = yield self.publisher.publish() self.assertEquals(_count_open_files(), open_file_count) children = yield self.client.get_children("/charms") self.assertEqual(children, [self.charm_key]) fh = yield self.storage.get(self.charm_storage_key) bundle = CharmBundle(fh) self.assertEqual(self.charm.get_sha256(), bundle.get_sha256()) self.assertEqual( result[0].bundle_url, "file://%s/%s" % ( self.storage_dir, self.charm_storage_key)) @inlineCallbacks def test_published_charm_sans_unicode(self): yield self.publisher.add_charm(self.charm_id, self.charm) yield self.publisher.publish() data, stat = yield self.client.get("/charms/%s" % self.charm_key) self.assertNotIn("unicode", data) @inlineCallbacks def test_add_charm_with_concurrent(self): """ Publishing a charm, that has become published concurrent, after the add_charm, works fine. it will write to storage regardless. The use of a sha256 as part of the storage key is utilized to help ensure uniqueness of bits. The sha256 is also stored with the charm state. This relation betewen the charm state and the binary bits, helps guarantee the property that any published charm in zookeeper will use the binary bits that it was published with. """ yield self.publisher.add_charm(self.charm_id, self.charm) concurrent_publisher = CharmPublisher( self.client, self.storage) charm = CharmDirectory(self.sample_dir1) yield concurrent_publisher.add_charm(self.charm_id, charm) yield self.publisher.publish() # modify the charm to create a conflict scenario self.makeFile("zebra", path=os.path.join(self.sample_dir1, "junk.txt")) # assert the charm now has a different sha post modification modified_charm_sha = charm.get_sha256() self.assertNotEqual( modified_charm_sha, self.charm.get_sha256()) # verify publishing raises a stateerror def verify_failure(result): if not isinstance(result, Failure): self.fail("Should have raised state error") result.trap(StateChanged) return True yield concurrent_publisher.publish().addBoth(verify_failure) # verify the zk state charm_nodes = yield self.client.get_children("/charms") self.assertEqual(charm_nodes, [self.charm_key]) content, stat = yield self.client.get( "/charms/%s" % charm_nodes[0]) # assert the checksum matches the initially published checksum self.assertEqual( yaml.load(content)["sha256"], self.charm.get_sha256()) store_path = os.path.join(self.storage_dir, self.charm_storage_key) self.assertTrue(os.path.exists(store_path)) # and the binary bits where stored modified_charm_storage_key = under.quote( "%s:%s" % (self.charm_id, modified_charm_sha)) modified_store_path = os.path.join( self.storage_dir, modified_charm_storage_key) self.assertTrue(os.path.exists(modified_store_path)) @inlineCallbacks def test_add_charm_with_concurrent_removal(self): """ If a charm is published, and it detects that the charm exists already exists, it will attempt to retrieve the charm state to verify there is no checksum mismatch. If concurrently the charm is removed, the publisher should fail with a statechange error. """ manager = self.mocker.patch(CharmStateManager) manager.get_charm_state(self.charm_id) self.mocker.passthrough() def match_charm_bundle(bundle): return isinstance(bundle, CharmBundle) def match_charm_url(url): return url.startswith("file://") manager.add_charm_state( self.charm_id, MATCH(match_charm_bundle), MATCH(match_charm_url)) self.mocker.result(fail(zookeeper.NodeExistsException())) manager.get_charm_state(self.charm_id) self.mocker.result(fail(zookeeper.NoNodeException())) self.mocker.replay() yield self.publisher.add_charm(self.charm_id, self.charm) yield self.failUnlessFailure(self.publisher.publish(), StateChanged) @inlineCallbacks def test_add_charm_already_known(self): """Adding an existing charm, is an effective noop, as its not added to the internal publisher queue. """ # Do an initial publishing of the charm scheduled = yield self.publisher.add_charm(self.charm_id, self.charm) self.assertTrue(scheduled) result = yield self.publisher.publish() self.assertEqual(result[0].name, self.charm.metadata.name) publisher = CharmPublisher(self.client, self.storage) scheduled = yield publisher.add_charm(self.charm_id, self.charm) self.assertFalse(scheduled) scheduled = yield publisher.add_charm(self.charm_id, self.charm) self.assertFalse(scheduled) result = yield publisher.publish() self.assertEqual(result[0].name, self.charm.metadata.name) self.assertEqual(result[1].name, self.charm.metadata.name)
def setUp(self): zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) yield self.client.connect() self.path = "/zoo"
class RetryChangeTest(ZookeeperTestCase): def update_function_increment(self, content, stat): if not content: return str(0) return str(int(content) + 1) def setUp(self): super(RetryChangeTest, self).setUp() self.client = ZookeeperClient("127.0.0.1:2181") return self.client.connect() def tearDown(self): utils.deleteTree("/", self.client.handle) self.client.close() @inlineCallbacks def test_node_create(self): """ retry_change will create a node if one does not exist. """ #use a mock to ensure the change function is only invoked once func = self.mocker.mock() func(None, None) self.mocker.result("hello") self.mocker.replay() yield retry_change( self.client, "/magic-beans", func) content, stat = yield self.client.get("/magic-beans") self.assertEqual(content, "hello") self.assertEqual(stat["version"], 0) @inlineCallbacks def test_node_update(self): """ retry_change will update an existing node. """ #use a mock to ensure the change function is only invoked once func = self.mocker.mock() func("", MATCH_STAT) self.mocker.result("hello") self.mocker.replay() yield self.client.create("/magic-beans") yield retry_change( self.client, "/magic-beans", func) content, stat = yield self.client.get("/magic-beans") self.assertEqual(content, "hello") self.assertEqual(stat["version"], 1) def test_error_in_change_function_propogates(self): """ an error in the change function propogates to the caller. """ def error_function(content, stat): raise SyntaxError() d = retry_change(self.client, "/magic-beans", error_function) self.failUnlessFailure(d, SyntaxError) return d @inlineCallbacks def test_concurrent_update_bad_version(self): """ If the node is updated after the retry function has read the node but before the content is set, the retry function will perform another read/change_func/set cycle. """ yield self.client.create("/animals") content, stat = yield self.client.get("/animals") yield self.client.set("/animals", "5") real_get = self.client.get p_client = self.mocker.proxy(self.client) p_client.get("/animals") self.mocker.result(succeed((content, stat))) p_client.get("/animals") self.mocker.call(real_get) self.mocker.replay() yield retry_change( p_client, "/animals", self.update_function_increment) content, stat = yield real_get("/animals") self.assertEqual(content, "6") self.assertEqual(stat["version"], 2) @inlineCallbacks def test_create_node_exists(self): """ If the node is created after the retry function has determined the node doesn't exist but before the node is created by the retry function. the retry function will perform another read/change_func/set cycle. """ yield self.client.create("/animals", "5") real_get = self.client.get p_client = self.mocker.patch(self.client) p_client.get("/animals") self.mocker.result(fail(zookeeper.NoNodeException())) p_client.get("/animals") self.mocker.call(real_get) self.mocker.replay() yield retry_change( p_client, "/animals", self.update_function_increment) content, stat = yield real_get("/animals") self.assertEqual(content, "6") self.assertEqual(stat["version"], 1) def test_set_node_does_not_exist(self): """ if the retry function goes to update a node which has been deleted since it was read, it will cycle through to another read/change_func set cycle. """ real_get = self.client.get p_client = self.mocker.patch(self.client) p_client.get("/animals") self.mocker.result("5", {"version": 1}) p_client.get("/animals") self.mocker.call(real_get) yield retry_change( p_client, "/animals", self.update_function_increment) content, stat = yield real_get("/animals") self.assertEqual(content, "0") self.assertEqual(stat["version"], 0) def test_identical_content_noop(self): """ If the change function generates identical content to the existing node, the retry change function exits without modifying the node. """ self.client.create("/animals", "hello") def update(content, stat): return content yield retry_change(self.client, "/animals", update) content, stat = self.client.get("/animals") self.assertEqual(content, "hello") self.assertEqual(stat["version"], 0)
def setUp(self): super(RetryChangeTest, self).setUp() self.client = ZookeeperClient("127.0.0.1:2181") return self.client.connect()
class NodeTest(TestCase): def setUp(self): super(NodeTest, self).setUp() zookeeper.set_debug_level(zookeeper.LOG_LEVEL_ERROR) self.client = ZookeeperClient("127.0.0.1:2181", 2000) d = self.client.connect() self.client2 = None def create_zoo(client): client.create("/zoo") d.addCallback(create_zoo) return d def tearDown(self): super(NodeTest, self).tearDown() deleteTree(handle=self.client.handle) if self.client.connected: self.client.close() if self.client2 and self.client2.connected: self.client2.close() zookeeper.set_debug_level(zookeeper.LOG_LEVEL_DEBUG) def _make_digest_identity(self, credentials): user, password = credentials.split(":") digest = hashlib.new("sha1", credentials).digest() return "%s:%s" % (user, base64.b64encode(digest)) def test_node_name_and_path(self): """ Each node has name and path. """ node = ZNode("/zoo/rabbit", self.client) self.assertEqual(node.name, "rabbit") self.assertEqual(node.path, "/zoo/rabbit") def test_node_event_repr(self): """ Node events have a human-readable representation. """ node = ZNode("/zoo", self.client) event = NodeEvent(4, None, node) self.assertEqual(repr(event), "<NodeEvent child at '/zoo'>") @inlineCallbacks def test_node_exists_nonexistant(self): """ A node knows whether it exists or not. """ node = ZNode("/zoo/rabbit", self.client) exists = yield node.exists() self.assertFalse(exists) @inlineCallbacks def test_node_set_data_on_nonexistant(self): """ Setting data on a non existant node raises a no node exception. """ node = ZNode("/zoo/rabbit", self.client) d = node.set_data("big furry ears") self.failUnlessFailure(d, zookeeper.NoNodeException) yield d @inlineCallbacks def test_node_create_set_data(self): """ A node can be created and have its data set. """ node = ZNode("/zoo/rabbit", self.client) data = "big furry ears" yield node.create(data) exists = yield self.client.exists("/zoo/rabbit") self.assertTrue(exists) node_data = yield node.get_data() self.assertEqual(data, node_data) data = data*2 yield node.set_data(data) node_data = yield node.get_data() self.assertEqual(data, node_data) @inlineCallbacks def test_node_get_data(self): """ Data can be fetched from a node. """ yield self.client.create("/zoo/giraffe", "mouse") data = yield ZNode("/zoo/giraffe", self.client).get_data() self.assertEqual(data, "mouse") @inlineCallbacks def test_node_get_data_nonexistant(self): """ Attempting to fetch data from a nonexistant node returns a non existant error. """ d = ZNode("/zoo/giraffe", self.client).get_data() self.failUnlessFailure(d, zookeeper.NoNodeException) yield d @inlineCallbacks def test_node_get_acl(self): """ The ACL for a node can be retrieved. """ yield self.client.create("/zoo/giraffe") acl = yield ZNode("/zoo/giraffe", self.client).get_acl() self.assertEqual(len(acl), 1) self.assertEqual(acl[0]['scheme'], 'world') def test_node_get_acl_nonexistant(self): """ The fetching the ACL for a non-existant node results in an error. """ node = ZNode("/zoo/giraffe", self.client) def assert_failed(failed): if not isinstance(failed, Failure): self.fail("Should have failed") self.assertTrue( isinstance(failed.value, zookeeper.NoNodeException)) d = node.get_acl() d.addBoth(assert_failed) return d @inlineCallbacks def test_node_set_acl(self): """ The ACL for a node can be modified. """ path = yield self.client.create("/zoo/giraffe") credentials = "zebra:moon" acl = [{"id": self._make_digest_identity(credentials), "scheme": "digest", "perms":zookeeper.PERM_ALL}] node = ZNode(path, self.client) # little hack around slow auth issue 770 zookeeper d = self.client.add_auth("digest", credentials) yield node.set_acl(acl) yield d node_acl, stat = yield self.client.get_acl(path) self.assertEqual(node_acl, acl) @inlineCallbacks def test_node_set_data_update_with_cached_exists(self): """ Data can be set on an existing node, updating it in place. """ node = ZNode("/zoo/monkey", self.client) yield self.client.create("/zoo/monkey", "stripes") exists = yield node.exists() self.assertTrue(exists) yield node.set_data("banana") data, stat = yield self.client.get("/zoo/monkey") self.assertEqual(data, "banana") @inlineCallbacks def test_node_set_data_update_with_invalid_cached_exists(self): """ If a node is deleted, attempting to set data on it raises a no node exception. """ node = ZNode("/zoo/monkey", self.client) yield self.client.create("/zoo/monkey", "stripes") exists = yield node.exists() self.assertTrue(exists) yield self.client.delete("/zoo/monkey") d = node.set_data("banana") self.failUnlessFailure(d, zookeeper.NoNodeException) yield d @inlineCallbacks def test_node_set_data_update_with_exists(self): """ Data can be set on an existing node, updating it in place. """ node = ZNode("/zoo/monkey", self.client) yield self.client.create("/zoo/monkey", "stripes") yield node.set_data("banana") data, stat = yield self.client.get("/zoo/monkey") self.assertEqual(data, "banana") @inlineCallbacks def test_node_exists_with_watch_nonexistant(self): """ The node's existance can be checked with the exist_watch api a deferred will be returned and any node level events, created, deleted, modified invoke the callback. You can get these create event callbacks for non existant nodes. """ node = ZNode("/zoo/elephant", self.client) exists, watch = yield node.exists_and_watch() self.assertFalse((yield exists)) yield self.client.create("/zoo/elephant") event = yield watch self.assertEqual(event.type, zookeeper.CREATED_EVENT) self.assertEqual(event.path, node.path) @inlineCallbacks def test_node_get_data_with_watch_on_update(self): """ Subscribing to a node will get node update events. """ yield self.client.create("/zoo/elephant") node = ZNode("/zoo/elephant", self.client) data, watch = yield node.get_data_and_watch() yield self.client.set("/zoo/elephant") event = yield watch self.assertEqual(event.type, zookeeper.CHANGED_EVENT) self.assertEqual(event.path, "/zoo/elephant") @inlineCallbacks def test_node_get_data_with_watch_on_delete(self): """ Subscribing to a node will get node deletion events. """ yield self.client.create("/zoo/elephant") node = ZNode("/zoo/elephant", self.client) data, watch = yield node.get_data_and_watch() yield self.client.delete("/zoo/elephant") event = yield watch self.assertEqual(event.type, zookeeper.DELETED_EVENT) self.assertEqual(event.path, "/zoo/elephant") @inlineCallbacks def test_node_children(self): """ A node's children can be introspected. """ node = ZNode("/zoo", self.client) node_path_a = yield self.client.create("/zoo/lion") node_path_b = yield self.client.create("/zoo/tiger") children = yield node.get_children() children.sort() self.assertEqual(children[0].path, node_path_a) self.assertEqual(children[1].path, node_path_b) @inlineCallbacks def test_node_children_by_prefix(self): """ A node's children can be introspected optionally with a prefix. """ node = ZNode("/zoo", self.client) node_path_a = yield self.client.create("/zoo/lion") yield self.client.create("/zoo/tiger") children = yield node.get_children("lion") children.sort() self.assertEqual(children[0].path, node_path_a) self.assertEqual(len(children), 1) @inlineCallbacks def test_node_get_children_with_watch_create(self): """ A node's children can explicitly be watched to given existance events for node creation and destruction. """ node = ZNode("/zoo", self.client) children, watch = yield node.get_children_and_watch() yield self.client.create("/zoo/lion") event = yield watch self.assertEqual(event.path, "/zoo") self.assertEqual(event.type, zookeeper.CHILD_EVENT) self.assertEqual(event.type_name, "child") @inlineCallbacks def test_node_get_children_with_watch_delete(self): """ A node's children can explicitly be watched to given existance events for node creation and destruction. """ node = ZNode("/zoo", self.client) yield self.client.create("/zoo/lion") children, watch = yield node.get_children_and_watch() yield self.client.delete("/zoo/lion") event = yield watch self.assertEqual(event.path, "/zoo") self.assertEqual(event.type, zookeeper.CHILD_EVENT) @inlineCallbacks def test_bad_version_error(self): """ The node captures the node version on any read operations, which it utilizes for write operations. On a concurrent modification error the node return a bad version error, this also clears the cached state so subsequent modifications will be against the latest version, unless the cache is seeded again by a read operation. """ node = ZNode("/zoo/lion", self.client) self.client2 = ZookeeperClient("127.0.0.1:2181") yield self.client2.connect() yield self.client.create("/zoo/lion", "mouse") yield node.get_data() yield self.client2.set("/zoo/lion", "den2") data = yield self.client.exists("/zoo/lion") self.assertEqual(data['version'], 1) d = node.set_data("zebra") self.failUnlessFailure(d, zookeeper.BadVersionException) yield d # after failure the cache is deleted, and a set proceeds yield node.set_data("zebra") data = yield node.get_data() self.assertEqual(data, "zebra")
def get_zookeeper_client(self): client = ZookeeperClient(get_test_zookeeper_address(), session_timeout=1000) return client
class CharmPublisherTest(RepositoryTestBase): @inlineCallbacks def setUp(self): super(CharmPublisherTest, self).setUp() zookeeper.set_debug_level(0) self.charm = CharmDirectory(self.sample_dir1) self.charm_id = local_charm_id(self.charm) self.charm_key = under.quote(self.charm_id) # provider storage key self.charm_storage_key = under.quote( "%s:%s" % (self.charm_id, self.charm.get_sha256())) self.client = ZookeeperClient(get_test_zookeeper_address()) self.storage_dir = self.makeDir() self.storage = FileStorage(self.storage_dir) self.publisher = CharmPublisher(self.client, self.storage) yield self.client.connect() yield self.client.create("/charms") def tearDown(self): deleteTree("/", self.client.handle) self.client.close() super(CharmPublisherTest, self).tearDown() @inlineCallbacks def test_add_charm_and_publish(self): open_file_count = _count_open_files() yield self.publisher.add_charm(self.charm_id, self.charm) result = yield self.publisher.publish() self.assertEquals(_count_open_files(), open_file_count) children = yield self.client.get_children("/charms") self.assertEqual(children, [self.charm_key]) fh = yield self.storage.get(self.charm_storage_key) bundle = CharmBundle(fh) self.assertEqual(self.charm.get_sha256(), bundle.get_sha256()) self.assertEqual( result[0].bundle_url, "file://%s/%s" % (self.storage_dir, self.charm_storage_key)) @inlineCallbacks def test_published_charm_sans_unicode(self): yield self.publisher.add_charm(self.charm_id, self.charm) yield self.publisher.publish() data, stat = yield self.client.get("/charms/%s" % self.charm_key) self.assertNotIn("unicode", data) @inlineCallbacks def test_add_charm_with_concurrent(self): """ Publishing a charm, that has become published concurrent, after the add_charm, works fine. it will write to storage regardless. The use of a sha256 as part of the storage key is utilized to help ensure uniqueness of bits. The sha256 is also stored with the charm state. This relation betewen the charm state and the binary bits, helps guarantee the property that any published charm in zookeeper will use the binary bits that it was published with. """ yield self.publisher.add_charm(self.charm_id, self.charm) concurrent_publisher = CharmPublisher(self.client, self.storage) charm = CharmDirectory(self.sample_dir1) yield concurrent_publisher.add_charm(self.charm_id, charm) yield self.publisher.publish() # modify the charm to create a conflict scenario self.makeFile("zebra", path=os.path.join(self.sample_dir1, "junk.txt")) # assert the charm now has a different sha post modification modified_charm_sha = charm.get_sha256() self.assertNotEqual(modified_charm_sha, self.charm.get_sha256()) # verify publishing raises a stateerror def verify_failure(result): if not isinstance(result, Failure): self.fail("Should have raised state error") result.trap(StateChanged) return True yield concurrent_publisher.publish().addBoth(verify_failure) # verify the zk state charm_nodes = yield self.client.get_children("/charms") self.assertEqual(charm_nodes, [self.charm_key]) content, stat = yield self.client.get("/charms/%s" % charm_nodes[0]) # assert the checksum matches the initially published checksum self.assertEqual(yaml.load(content)["sha256"], self.charm.get_sha256()) store_path = os.path.join(self.storage_dir, self.charm_storage_key) self.assertTrue(os.path.exists(store_path)) # and the binary bits where stored modified_charm_storage_key = under.quote( "%s:%s" % (self.charm_id, modified_charm_sha)) modified_store_path = os.path.join(self.storage_dir, modified_charm_storage_key) self.assertTrue(os.path.exists(modified_store_path)) @inlineCallbacks def test_add_charm_with_concurrent_removal(self): """ If a charm is published, and it detects that the charm exists already exists, it will attempt to retrieve the charm state to verify there is no checksum mismatch. If concurrently the charm is removed, the publisher should fail with a statechange error. """ manager = self.mocker.patch(CharmStateManager) manager.get_charm_state(self.charm_id) self.mocker.passthrough() def match_charm_bundle(bundle): return isinstance(bundle, CharmBundle) def match_charm_url(url): return url.startswith("file://") manager.add_charm_state(self.charm_id, MATCH(match_charm_bundle), MATCH(match_charm_url)) self.mocker.result(fail(zookeeper.NodeExistsException())) manager.get_charm_state(self.charm_id) self.mocker.result(fail(zookeeper.NoNodeException())) self.mocker.replay() yield self.publisher.add_charm(self.charm_id, self.charm) yield self.failUnlessFailure(self.publisher.publish(), StateChanged) @inlineCallbacks def test_add_charm_already_known(self): """Adding an existing charm, is an effective noop, as its not added to the internal publisher queue. """ # Do an initial publishing of the charm scheduled = yield self.publisher.add_charm(self.charm_id, self.charm) self.assertTrue(scheduled) result = yield self.publisher.publish() self.assertEqual(result[0].name, self.charm.metadata.name) publisher = CharmPublisher(self.client, self.storage) scheduled = yield publisher.add_charm(self.charm_id, self.charm) self.assertFalse(scheduled) scheduled = yield publisher.add_charm(self.charm_id, self.charm) self.assertFalse(scheduled) result = yield publisher.publish() self.assertEqual(result[0].name, self.charm.metadata.name) self.assertEqual(result[1].name, self.charm.metadata.name)
class WatchDeliveryConnectionFailedTest(ZookeeperTestCase): """Watches are still sent on reconnect. """ def setUp(self): super(WatchDeliveryConnectionFailedTest, self).setUp() self.proxy = ProxyFactory("127.0.0.1", 2181) self.proxy_port = reactor.listenTCP(0, self.proxy) host = self.proxy_port.getHost() self.proxied_client = ZookeeperClient( "%s:%s" % (host.host, host.port)) self.direct_client = ZookeeperClient("127.0.0.1:2181", 3000) self.session_events = [] def session_event_collector(conn, event): self.session_events.append(event) self.proxied_client.set_session_callback(session_event_collector) return self.direct_client.connect() @inlineCallbacks def tearDown(self): zookeeper.set_debug_level(0) if self.proxied_client.connected: yield self.proxied_client.close() if not self.direct_client.connected: yield self.direct_client.connect() utils.deleteTree(handle=self.direct_client.handle) yield self.direct_client.close() self.proxy.lose_connection() yield self.proxy_port.stopListening() def verify_events(self, events, expected): """Verify the state of the session events encountered. """ for value, state in zip([e.state_name for e in events], expected): self.assertEqual(value, state) @inlineCallbacks def test_child_watch_fires_upon_reconnect(self): yield self.proxied_client.connect() # Setup tree cpath = "/test-tree" yield self.direct_client.create(cpath) # Setup watch child_d, watch_d = self.proxied_client.get_children_and_watch(cpath) self.assertEqual((yield child_d), []) # Kill the connection and fire the watch self.proxy.lose_connection() yield self.direct_client.create( cpath + "/abc", flags=zookeeper.SEQUENCE) # We should still get the child event. yield watch_d # We get two pairs of (connecting, connected) for the conn and watch self.assertEqual(len(self.session_events), 4) self.verify_events( self.session_events, ("connecting", "connecting", "connected", "connected")) @inlineCallbacks def test_exists_watch_fires_upon_reconnect(self): yield self.proxied_client.connect() cpath = "/test" # Setup watch exists_d, watch_d = self.proxied_client.exists_and_watch(cpath) self.assertEqual((yield exists_d), None) # Kill the connection and fire the watch self.proxy.lose_connection() yield self.direct_client.create(cpath) # We should still get the exists event. yield watch_d # We get two pairs of (connecting, connected) for the conn and watch self.assertEqual(len(self.session_events), 4) self.verify_events( self.session_events, ("connecting", "connecting", "connected", "connected")) @inlineCallbacks def test_get_watch_fires_upon_reconnect(self): yield self.proxied_client.connect() # Setup tree cpath = "/test" yield self.direct_client.create(cpath, "abc") # Setup watch get_d, watch_d = self.proxied_client.get_and_watch(cpath) content, stat = yield get_d self.assertEqual(content, "abc") # Kill the connection and fire the watch self.proxy.lose_connection() yield self.direct_client.set(cpath, "xyz") # We should still get the exists event. yield watch_d # We also two pairs of (connecting, connected) for the conn and watch self.assertEqual(len(self.session_events), 4) self.verify_events( self.session_events, ("connecting", "connecting", "connected", "connected")) @inlineCallbacks def test_watch_delivery_failure_resends(self): """Simulate a network failure for the watch delivery The zk server effectively sends the watch delivery to the client, but the client never recieves it. """ yield self.proxied_client.connect() cpath = "/test" # Setup watch exists_d, watch_d = self.proxied_client.exists_and_watch(cpath) self.assertEqual((yield exists_d), None) # Pause the connection fire the watch, and blackhole the data. self.proxy.set_blocked(True) yield self.direct_client.create(cpath) self.proxy.set_blocked(False) self.proxy.lose_connection() # We should still get the exists event. yield watch_d @inlineCallbacks def xtest_binding_bug_session_exception(self): """This test triggers an exception in the python-zookeeper binding. File "txzookeeper/client.py", line 491, in create self.handle, path, data, acls, flags, callback) exceptions.SystemError: error return without exception set """ yield self.proxied_client.connect() data_d, watch_d = yield self.proxied_client.exists_and_watch("/") self.assertTrue((yield data_d)) self.proxy.set_blocked(True) # Wait for session expiration, on a single server options are limited yield self.sleep(15) # Unblock the proxy for next connect, and then drop the connection. self.proxy.set_blocked(False) self.proxy.lose_connection() # Wait for a reconnect yield self.assertFailure(watch_d, zookeeper.SessionExpiredException) # Leads to bindings bug failure yield self.assertFailure( self.proxied_client.get("/a"), zookeeper.SessionExpiredException) self.assertEqual(self.session_events[-1].state_name, "expired")
class YAMLStateTest(TestCase): @inlineCallbacks def setUp(self): zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) yield self.client.connect() self.path = "/zoo" @inlineCallbacks def tearDown(self): exists = yield self.client.exists(self.path) if exists: yield remove_tree(self.client, self.path) @inlineCallbacks def test_get_empty(self): """Verify getting an empty node works as expected.""" path = yield self.client.create(self.path) node = YAMLState(self.client, path) self.assertEqual(node, {}) @inlineCallbacks def test_access_wo_create(self): """Verify accessing data for a non-existant node works as expected.""" node = YAMLState(self.client, self.path) yield node.read() self.assertEqual(node, {}) def test_set_wo_read(self): """Verify that not calling read before mutation raises.""" node = YAMLState(self.client, self.path) self.assertRaises(ValueError, node.__setitem__, "alpha", "beta") self.assertRaises(ValueError, node.update, {"alpha": "beta"}) @inlineCallbacks def test_set_wo_write(self): """Check that get resolves from the internal write buffer. set/get pairs w/o write should present a view of the state reflecting local change. Verify that w/o write local data appears on subsequent calls but that zk state hasn't been changed. """ path = yield self.client.create(self.path) node = YAMLState(self.client, path) yield node.read() options = dict(alpha="beta", one=1) node.update(options) self.assertEqual(node, options) zk_data, stat = yield self.client.get(self.path) # the node isn't created yet in zk self.assertEqual(zk_data, "") @inlineCallbacks def test_set_w_write(self): """Verify that write updates the local and zk state. When write is called we expect that zk state reflects this. We also expect calls to get to expect the reflected state. """ node = YAMLState(self.client, self.path) yield node.read() options = dict(alpha="beta", one=1) node.update(options) changes = yield node.write() self.assertEqual( set(changes), set([AddedItem(key='alpha', new='beta'), AddedItem(key='one', new=1)])) # a local get should reflect proper data self.assertEqual(node, options) # and a direct look at zk should work as well zk_data, stat = yield self.client.get(self.path) zk_data = yaml.load(zk_data) self.assertEqual(zk_data, options) @inlineCallbacks def test_conflict_on_set(self): """Version conflict error tests. Test that two YAMLState objects writing to the same path can and will throw version errors when elements become out of read. """ node = YAMLState(self.client, self.path) node2 = YAMLState(self.client, self.path) yield node.read() yield node2.read() options = dict(alpha="beta", one=1) node.update(options) yield node.write() node2.update(options) changes = yield node2.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("one", 1)])) # first read node2 self.assertEqual(node, options) # write on node 1 options2 = dict(alpha="gamma", one="two") node.update(options2) changes = yield node.write() self.assertEqual( set(changes), set([ModifiedItem("alpha", "beta", "gamma"), ModifiedItem("one", 1, "two")])) # verify that node 1 reports as expected self.assertEqual(node, options2) # verify that node2 has the older data still self.assertEqual(node2, options) # now issue a set/write from node2 # this will merge the data deleting 'one' # and updating other values options3 = dict(alpha="cappa", new="next") node2.update(options3) del node2["one"] expected = dict(alpha="cappa", new="next") changes = yield node2.write() self.assertEqual( set(changes), set([DeletedItem("one", 1), ModifiedItem("alpha", "beta", "cappa"), AddedItem("new", "next")])) self.assertEqual(expected, node2) # but node still reflects the old data self.assertEqual(node, options2) @inlineCallbacks def test_setitem(self): node = YAMLState(self.client, self.path) yield node.read() options = dict(alpha="beta", one=1) node["alpha"] = "beta" node["one"] = 1 changes = yield node.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("one", 1)])) # a local get should reflect proper data self.assertEqual(node, options) # and a direct look at zk should work as well zk_data, stat = yield self.client.get(self.path) zk_data = yaml.load(zk_data) self.assertEqual(zk_data, options) @inlineCallbacks def test_multiple_reads(self): """Calling read resets state to ZK after multiple round-trips.""" node = YAMLState(self.client, self.path) yield node.read() node.update({"alpha": "beta", "foo": "bar"}) self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "bar") yield node.read() # A read resets the data to the empty state self.assertEqual(node, {}) node.update({"alpha": "beta", "foo": "bar"}) changes = yield node.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("foo", "bar")])) # A write retains the newly set values self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "bar") # now get another state instance and change zk state node2 = YAMLState(self.client, self.path) yield node2.read() node2.update({"foo": "different"}) changes = yield node2.write() self.assertEqual( changes, [ModifiedItem("foo", "bar", "different")]) # This should pull in the new state (and still have the merged old. yield node.read() self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "different") def test_dictmixin_usage(self): """Verify that the majority of dict operation function.""" node = YAMLState(self.client, self.path) yield node.read() node.update({"alpha": "beta", "foo": "bar"}) self.assertEqual(node, {"alpha": "beta", "foo": "bar"}) result = node.pop("foo") self.assertEqual(result, "bar") self.assertEqual(node, {"alpha": "beta"}) node["delta"] = "gamma" self.assertEqual(set(node.keys()), set(("alpha", "delta"))) result = list(node.iteritems()) self.assertIn(("alpha", "beta"), result) self.assertIn(("delta", "gamma"), result) @inlineCallbacks def test_del_empties_state(self): d = YAMLState(self.client, self.path) yield d.read() d["a"] = "foo" changes = yield d.write() self.assertEqual(changes, [AddedItem("a", "foo")]) del d["a"] changes = yield d.write() self.assertEqual(changes, [DeletedItem("a", "foo")]) self.assertEqual(d, {}) @inlineCallbacks def test_read_resync(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1["a"] = "foo" changes = yield d1.write() self.assertEqual(changes, [AddedItem("a", "foo")]) d2 = YAMLState(self.client, self.path) yield d2.read() del d2["a"] changes = yield d2.write() self.assertEqual(changes, [DeletedItem("a", "foo")]) d2["a"] = "bar" changes = yield d2.write() self.assertEqual(changes, [AddedItem("a", "bar")]) zk_data, stat = yield self.client.get(self.path) yield d1.read() # d1 should pick up the new value (from d2) on a read zk_data, stat = yield self.client.get(self.path) self.assertEqual(d1["a"], "bar") @inlineCallbacks def test_multiple_writes(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1.update(dict(foo="bar", this="that")) changes = yield d1.write() self.assertEqual( set(changes), set([AddedItem("foo", "bar"), AddedItem("this", "that")])) del d1["this"] d1["another"] = "value" changes = yield d1.write() self.assertEqual( set(changes), set([DeletedItem("this", "that"), AddedItem("another", "value")])) expected = {"foo": "bar", "another": "value"} self.assertEqual(d1, expected) changes = yield d1.write() self.assertEqual(changes, []) self.assertEqual(d1, expected) yield d1.read() self.assertEqual(d1, expected) # This shouldn't write any changes changes = yield d1.write() self.assertEqual(changes, []) self.assertEqual(d1, expected) @inlineCallbacks def test_write_twice(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1["a"] = "foo" changes = yield d1.write() self.assertEqual(changes, [AddedItem("a", "foo")]) d2 = YAMLState(self.client, self.path) yield d2.read() d2["a"] = "bar" changes = yield d2.write() self.assertEqual(changes, [ModifiedItem("a", "foo", "bar")]) # Shouldn't write again. Changes were already # flushed and acted upon by other parties. changes = yield d1.write() self.assertEqual(changes, []) yield d1.read() self.assertEquals(d1, d2) @inlineCallbacks def test_read_requires_node(self): """Validate that read raises when required=True.""" d1 = YAMLState(self.client, self.path) yield self.assertFailure(d1.read(True), StateNotFound)
def bootstrap(self): """Bootstrap a local development environment. """ # Check for existing environment state = yield self.load_state() if state is not False: raise ProviderError("Environment already bootstrapped") # Check for required packages log.info("Checking for required packages...") missing = check_packages(*REQUIRED_PACKAGES) if missing: raise ProviderError("Missing packages %s" % ( ", ".join(sorted(list(missing))))) # Get/create directory for zookeeper and files zookeeper_dir = os.path.join(self._directory, "zookeeper") if not os.path.exists(zookeeper_dir): os.makedirs(zookeeper_dir) # Start networking, and get an open port. log.info("Starting networking...") net = Network("default", subnet=122) # Start is a noop if its already started, which it is by default, # per libvirt-bin package installation yield net.start() net_attributes = yield net.get_attributes() port = get_open_port(net_attributes["ip"]["address"]) # Start zookeeper log.info("Starting zookeeper...") # Run zookeeper as the current user, unless we're being run as root # in which case run zookeeper as the 'zookeeper' user. zookeeper_user = None if os.geteuid() == 0: zookeeper_user = "******" zookeeper = Zookeeper(zookeeper_dir, port=port, host=net_attributes["ip"]["address"], user=zookeeper_user, group=zookeeper_user) yield zookeeper.start() # Starting provider storage server log.info("Starting storage server...") storage_server = StorageServer( pid_file=os.path.join(self._directory, "storage-server.pid"), storage_dir=os.path.join(self._directory, "files"), host=net_attributes["ip"]["address"], port=get_open_port(net_attributes["ip"]["address"]), log_file=os.path.join(self._directory, "storage-server.log")) yield storage_server.start() # Save the zookeeper start to provider storage. yield self.save_state({"zookeeper-instances": ["local"], "zookeeper-address": zookeeper.address}) # Initialize the zookeeper state log.debug("Initializing state...") admin_identity = make_identity( "admin:%s" % self.config["admin-secret"]) client = ZookeeperClient(zookeeper.address) yield client.connect() hierarchy = StateHierarchy(client, admin_identity, "local", "local") yield hierarchy.initialize() # Store user credentials from the running user try: public_key = get_user_authorized_keys(self.config) public_key = public_key.strip() except LookupError, e: raise ProviderError(str(e))
class ClientSessionTests(ZookeeperTestCase): def setUp(self): super(ClientSessionTests, self).setUp() self.cluster.start() self.client = None self.client2 = None zookeeper.deterministic_conn_order(True) zookeeper.set_debug_level(0) @property def cluster(self): return CLUSTER def tearDown(self): super(ClientSessionTests, self).tearDown() if self.client: self.client.close() self.cluster.reset() @inlineCallbacks def test_client_session_migration(self): """A client will automatically rotate servers to ensure a connection. A client connected to multiple servers, will transparently migrate amongst them, as individual servers can no longer be reached. A client's session will be maintined. """ # Connect to the Zookeeper Cluster servers = ",".join([s.address for s in self.cluster]) self.client = ZookeeperClient(servers) yield self.client.connect() yield self.client.create("/hello", flags=zookeeper.EPHEMERAL) # Shutdown the server the client is connected to self.cluster[0].stop() # Wait for the shutdown and cycle, if we don't wait we'll # get a zookeeper connectionloss exception on occassion. yield self.sleep(0.1) self.assertTrue(self.client.connected) exists = yield self.client.exists("/hello") self.assertTrue(exists) @inlineCallbacks def test_client_watch_migration(self): """On server rotation, extant watches are still active. A client connected to multiple servers, will transparently migrate amongst them, as individual servers can no longer be reached. Watch deferreds issued from the same client instance will continue to function as the session is maintained. """ session_events = [] def session_event_callback(connection, e): session_events.append(e) # Connect to the Zookeeper Cluster servers = ",".join([s.address for s in self.cluster]) self.client = ZookeeperClient(servers) self.client.set_session_callback(session_event_callback) yield self.client.connect() # Setup a watch yield self.client.create("/hello") exists_d, watch_d = self.client.exists_and_watch("/hello") yield exists_d # Shutdown the server the client is connected to self.cluster[0].stop() # Wait for the shutdown and cycle, if we don't wait we'll # get occasionally get a zookeeper connectionloss exception. yield self.sleep(0.1) # The session events that would have been ignored are sent # to the session event callback. self.assertTrue(session_events) self.assertTrue(self.client.connected) # If we delete the node, we'll see the watch fire. yield self.client.delete("/hello") event = yield watch_d self.assertEqual(event.type_name, "deleted") self.assertEqual(event.path, "/hello") @inlineCallbacks def test_connection_error_handler(self): """A callback can be specified for connection errors. We can specify a callback for connection errors, that can perform recovery for a disconnected client, restablishing """ @inlineCallbacks def connection_error_handler(connection, error): # Moved management of this connection attribute out of the # default behavior for a connection exception, to support # the retry facade. Under the hood libzk is going to be # trying to transparently reconnect connection.connected = False # On loss of the connection, reconnect the client w/ same session. yield connection.connect( self.cluster[1].address, client_id=connection.client_id) returnValue(23) self.client = ZookeeperClient(self.cluster[0].address) self.client.set_connection_error_callback(connection_error_handler) yield self.client.connect() yield self.client.create("/hello") exists_d, watch_d = self.client.exists_and_watch("/hello") yield exists_d # Shutdown the server the client is connected to self.cluster[0].stop() yield self.sleep(0.1) # Results in connection loss exception, and invoking of error handler. result = yield self.client.exists("/hello") # The result of the error handler is returned to the api self.assertEqual(result, 23) exists = yield self.client.exists("/hello") self.assertTrue(exists) @inlineCallbacks def test_client_session_expiration_event(self): """A client which recieves a session expiration event. """ session_events = [] events_received = Deferred() def session_event_callback(connection, e): session_events.append(e) if len(session_events) == 8: events_received.callback(True) # Connect to a node in the cluster and establish a watch self.client = ZookeeperClient(self.cluster[0].address) self.client.set_session_callback(session_event_callback) yield self.client.connect() # Setup some watches to verify they are cleaned out on expiration. d, e_watch_d = self.client.exists_and_watch("/") yield d d, g_watch_d = self.client.get_and_watch("/") yield d d, c_watch_d = self.client.get_children_and_watch("/") yield d # Connect a client to the same session on a different node. self.client2 = ZookeeperClient(self.cluster[1].address) yield self.client2.connect(client_id=self.client.client_id) # Close the new client and wait for the event propogation yield self.client2.close() # It can take some time for this to propagate yield events_received self.assertEqual(len(session_events), 8) # The last four (conn + 3 watches) are all expired for evt in session_events[4:]: self.assertEqual(evt.state_name, "expired") # The connection is dead without reconnecting. yield self.assertFailure( self.client.exists("/"), NotConnectedException, ConnectionException) self.assertTrue(self.client.unrecoverable) yield self.assertFailure(e_watch_d, zookeeper.SessionExpiredException) yield self.assertFailure(g_watch_d, zookeeper.SessionExpiredException) yield self.assertFailure(c_watch_d, zookeeper.SessionExpiredException) # If a reconnect attempt is made with a dead session id print "reconnect" yield self.client.connect(client_id=self.client.client_id) yield self.assertFailure( self.client.get_children("/"), NotConnectedException, ConnectionException) test_client_session_expiration_event.timeout = 10 @inlineCallbacks def test_client_reconnect_session_on_different_server(self): """On connection failure, An application can choose to use a new connection with which to reconnect to a different member of the zookeeper cluster, reacquiring the extant session. A large obvious caveat to using a new client instance rather than reconnecting the existing client, is that even though the session has outstanding watches, the watch callbacks/deferreds won't be active unless the client instance used to create them is connected. """ session_events = [] def session_event_callback(connection, e): session_events.append(e) # Connect to a node in the cluster and establish a watch self.client = ZookeeperClient(self.cluster[2].address) self.client.set_session_callback(session_event_callback) yield self.client.connect() yield self.client.create("/hello", flags=zookeeper.EPHEMERAL) self.assertTrue((yield self.client.exists("/hello"))) # Shutdown the server the client is connected to self.cluster[2].stop() yield self.sleep(0.1) # Verify we got a session event regarding the down server self.assertTrue(session_events) # Open up a new connection to a different server with same session self.client2 = ZookeeperClient(self.cluster[0].address) yield self.client2.connect(client_id=self.client.client_id) # Close the old disconnected client self.client.close() # Verify the ephemeral still exists exists = yield self.client2.exists("/hello") self.assertTrue(exists) # Destroy the session and reconnect self.client2.close() yield self.client.connect(self.cluster[0].address) # Ephemeral is destroyed when the session closed. exists = yield self.client.exists("/hello") self.assertFalse(exists)
class YAMLStateTest(TestCase): @inlineCallbacks def setUp(self): zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) yield self.client.connect() self.path = "/zoo" @inlineCallbacks def tearDown(self): exists = yield self.client.exists(self.path) if exists: yield remove_tree(self.client, self.path) @inlineCallbacks def test_get_empty(self): """Verify getting an empty node works as expected.""" path = yield self.client.create(self.path) node = YAMLState(self.client, path) self.assertEqual(node, {}) @inlineCallbacks def test_access_wo_create(self): """Verify accessing data for a non-existant node works as expected.""" node = YAMLState(self.client, self.path) yield node.read() self.assertEqual(node, {}) def test_set_wo_read(self): """Verify that not calling read before mutation raises.""" node = YAMLState(self.client, self.path) self.assertRaises(ValueError, node.__setitem__, "alpha", "beta") self.assertRaises(ValueError, node.update, {"alpha": "beta"}) @inlineCallbacks def test_set_wo_write(self): """Check that get resolves from the internal write buffer. set/get pairs w/o write should present a view of the state reflecting local change. Verify that w/o write local data appears on subsequent calls but that zk state hasn't been changed. """ path = yield self.client.create(self.path) node = YAMLState(self.client, path) yield node.read() options = dict(alpha="beta", one=1) node.update(options) self.assertEqual(node, options) zk_data, stat = yield self.client.get(self.path) # the node isn't created yet in zk self.assertEqual(zk_data, "") @inlineCallbacks def test_set_w_write(self): """Verify that write updates the local and zk state. When write is called we expect that zk state reflects this. We also expect calls to get to expect the reflected state. """ node = YAMLState(self.client, self.path) yield node.read() options = dict(alpha="beta", one=1) node.update(options) changes = yield node.write() self.assertEqual( set(changes), set([ AddedItem(key='alpha', new='beta'), AddedItem(key='one', new=1) ])) # a local get should reflect proper data self.assertEqual(node, options) # and a direct look at zk should work as well zk_data, stat = yield self.client.get(self.path) zk_data = yaml.load(zk_data) self.assertEqual(zk_data, options) @inlineCallbacks def test_conflict_on_set(self): """Version conflict error tests. Test that two YAMLState objects writing to the same path can and will throw version errors when elements become out of read. """ node = YAMLState(self.client, self.path) node2 = YAMLState(self.client, self.path) yield node.read() yield node2.read() options = dict(alpha="beta", one=1) node.update(options) yield node.write() node2.update(options) changes = yield node2.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("one", 1)])) # first read node2 self.assertEqual(node, options) # write on node 1 options2 = dict(alpha="gamma", one="two") node.update(options2) changes = yield node.write() self.assertEqual( set(changes), set([ ModifiedItem("alpha", "beta", "gamma"), ModifiedItem("one", 1, "two") ])) # verify that node 1 reports as expected self.assertEqual(node, options2) # verify that node2 has the older data still self.assertEqual(node2, options) # now issue a set/write from node2 # this will merge the data deleting 'one' # and updating other values options3 = dict(alpha="cappa", new="next") node2.update(options3) del node2["one"] expected = dict(alpha="cappa", new="next") changes = yield node2.write() self.assertEqual( set(changes), set([ DeletedItem("one", 1), ModifiedItem("alpha", "beta", "cappa"), AddedItem("new", "next") ])) self.assertEqual(expected, node2) # but node still reflects the old data self.assertEqual(node, options2) @inlineCallbacks def test_setitem(self): node = YAMLState(self.client, self.path) yield node.read() options = dict(alpha="beta", one=1) node["alpha"] = "beta" node["one"] = 1 changes = yield node.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("one", 1)])) # a local get should reflect proper data self.assertEqual(node, options) # and a direct look at zk should work as well zk_data, stat = yield self.client.get(self.path) zk_data = yaml.load(zk_data) self.assertEqual(zk_data, options) @inlineCallbacks def test_multiple_reads(self): """Calling read resets state to ZK after multiple round-trips.""" node = YAMLState(self.client, self.path) yield node.read() node.update({"alpha": "beta", "foo": "bar"}) self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "bar") yield node.read() # A read resets the data to the empty state self.assertEqual(node, {}) node.update({"alpha": "beta", "foo": "bar"}) changes = yield node.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("foo", "bar")])) # A write retains the newly set values self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "bar") # now get another state instance and change zk state node2 = YAMLState(self.client, self.path) yield node2.read() node2.update({"foo": "different"}) changes = yield node2.write() self.assertEqual(changes, [ModifiedItem("foo", "bar", "different")]) # This should pull in the new state (and still have the merged old. yield node.read() self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "different") def test_dictmixin_usage(self): """Verify that the majority of dict operation function.""" node = YAMLState(self.client, self.path) yield node.read() node.update({"alpha": "beta", "foo": "bar"}) self.assertEqual(node, {"alpha": "beta", "foo": "bar"}) result = node.pop("foo") self.assertEqual(result, "bar") self.assertEqual(node, {"alpha": "beta"}) node["delta"] = "gamma" self.assertEqual(set(node.keys()), set(("alpha", "delta"))) result = list(node.iteritems()) self.assertIn(("alpha", "beta"), result) self.assertIn(("delta", "gamma"), result) @inlineCallbacks def test_del_empties_state(self): d = YAMLState(self.client, self.path) yield d.read() d["a"] = "foo" changes = yield d.write() self.assertEqual(changes, [AddedItem("a", "foo")]) del d["a"] changes = yield d.write() self.assertEqual(changes, [DeletedItem("a", "foo")]) self.assertEqual(d, {}) @inlineCallbacks def test_read_resync(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1["a"] = "foo" changes = yield d1.write() self.assertEqual(changes, [AddedItem("a", "foo")]) d2 = YAMLState(self.client, self.path) yield d2.read() del d2["a"] changes = yield d2.write() self.assertEqual(changes, [DeletedItem("a", "foo")]) d2["a"] = "bar" changes = yield d2.write() self.assertEqual(changes, [AddedItem("a", "bar")]) zk_data, stat = yield self.client.get(self.path) yield d1.read() # d1 should pick up the new value (from d2) on a read zk_data, stat = yield self.client.get(self.path) self.assertEqual(d1["a"], "bar") @inlineCallbacks def test_multiple_writes(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1.update(dict(foo="bar", this="that")) changes = yield d1.write() self.assertEqual( set(changes), set([AddedItem("foo", "bar"), AddedItem("this", "that")])) del d1["this"] d1["another"] = "value" changes = yield d1.write() self.assertEqual( set(changes), set([DeletedItem("this", "that"), AddedItem("another", "value")])) expected = {"foo": "bar", "another": "value"} self.assertEqual(d1, expected) changes = yield d1.write() self.assertEqual(changes, []) self.assertEqual(d1, expected) yield d1.read() self.assertEqual(d1, expected) # This shouldn't write any changes changes = yield d1.write() self.assertEqual(changes, []) self.assertEqual(d1, expected) @inlineCallbacks def test_write_twice(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1["a"] = "foo" changes = yield d1.write() self.assertEqual(changes, [AddedItem("a", "foo")]) d2 = YAMLState(self.client, self.path) yield d2.read() d2["a"] = "bar" changes = yield d2.write() self.assertEqual(changes, [ModifiedItem("a", "foo", "bar")]) # Shouldn't write again. Changes were already # flushed and acted upon by other parties. changes = yield d1.write() self.assertEqual(changes, []) yield d1.read() self.assertEquals(d1, d2) @inlineCallbacks def test_read_requires_node(self): """Validate that read raises when required=True.""" d1 = YAMLState(self.client, self.path) yield self.assertFailure(d1.read(True), StateNotFound)