class CharmPublisherTest(RepositoryTestBase): @inlineCallbacks def setUp(self): super(CharmPublisherTest, self).setUp() zookeeper.set_debug_level(0) self.charm = CharmDirectory(self.sample_dir1) self.charm_id = local_charm_id(self.charm) self.charm_key = under.quote(self.charm_id) # provider storage key self.charm_storage_key = under.quote( "%s:%s" % (self.charm_id, self.charm.get_sha256())) self.client = ZookeeperClient(get_test_zookeeper_address()) self.storage_dir = self.makeDir() self.storage = FileStorage(self.storage_dir) self.publisher = CharmPublisher(self.client, self.storage) yield self.client.connect() yield self.client.create("/charms") def tearDown(self): deleteTree("/", self.client.handle) self.client.close() super(CharmPublisherTest, self).tearDown() @inlineCallbacks def test_add_charm_and_publish(self): open_file_count = _count_open_files() yield self.publisher.add_charm(self.charm_id, self.charm) result = yield self.publisher.publish() self.assertEquals(_count_open_files(), open_file_count) children = yield self.client.get_children("/charms") self.assertEqual(children, [self.charm_key]) fh = yield self.storage.get(self.charm_storage_key) bundle = CharmBundle(fh) self.assertEqual(self.charm.get_sha256(), bundle.get_sha256()) self.assertEqual( result[0].bundle_url, "file://%s/%s" % ( self.storage_dir, self.charm_storage_key)) @inlineCallbacks def test_published_charm_sans_unicode(self): yield self.publisher.add_charm(self.charm_id, self.charm) yield self.publisher.publish() data, stat = yield self.client.get("/charms/%s" % self.charm_key) self.assertNotIn("unicode", data) @inlineCallbacks def test_add_charm_with_concurrent(self): """ Publishing a charm, that has become published concurrent, after the add_charm, works fine. it will write to storage regardless. The use of a sha256 as part of the storage key is utilized to help ensure uniqueness of bits. The sha256 is also stored with the charm state. This relation betewen the charm state and the binary bits, helps guarantee the property that any published charm in zookeeper will use the binary bits that it was published with. """ yield self.publisher.add_charm(self.charm_id, self.charm) concurrent_publisher = CharmPublisher( self.client, self.storage) charm = CharmDirectory(self.sample_dir1) yield concurrent_publisher.add_charm(self.charm_id, charm) yield self.publisher.publish() # modify the charm to create a conflict scenario self.makeFile("zebra", path=os.path.join(self.sample_dir1, "junk.txt")) # assert the charm now has a different sha post modification modified_charm_sha = charm.get_sha256() self.assertNotEqual( modified_charm_sha, self.charm.get_sha256()) # verify publishing raises a stateerror def verify_failure(result): if not isinstance(result, Failure): self.fail("Should have raised state error") result.trap(StateChanged) return True yield concurrent_publisher.publish().addBoth(verify_failure) # verify the zk state charm_nodes = yield self.client.get_children("/charms") self.assertEqual(charm_nodes, [self.charm_key]) content, stat = yield self.client.get( "/charms/%s" % charm_nodes[0]) # assert the checksum matches the initially published checksum self.assertEqual( yaml.load(content)["sha256"], self.charm.get_sha256()) store_path = os.path.join(self.storage_dir, self.charm_storage_key) self.assertTrue(os.path.exists(store_path)) # and the binary bits where stored modified_charm_storage_key = under.quote( "%s:%s" % (self.charm_id, modified_charm_sha)) modified_store_path = os.path.join( self.storage_dir, modified_charm_storage_key) self.assertTrue(os.path.exists(modified_store_path)) @inlineCallbacks def test_add_charm_with_concurrent_removal(self): """ If a charm is published, and it detects that the charm exists already exists, it will attempt to retrieve the charm state to verify there is no checksum mismatch. If concurrently the charm is removed, the publisher should fail with a statechange error. """ manager = self.mocker.patch(CharmStateManager) manager.get_charm_state(self.charm_id) self.mocker.passthrough() def match_charm_bundle(bundle): return isinstance(bundle, CharmBundle) def match_charm_url(url): return url.startswith("file://") manager.add_charm_state( self.charm_id, MATCH(match_charm_bundle), MATCH(match_charm_url)) self.mocker.result(fail(zookeeper.NodeExistsException())) manager.get_charm_state(self.charm_id) self.mocker.result(fail(zookeeper.NoNodeException())) self.mocker.replay() yield self.publisher.add_charm(self.charm_id, self.charm) yield self.failUnlessFailure(self.publisher.publish(), StateChanged) @inlineCallbacks def test_add_charm_already_known(self): """Adding an existing charm, is an effective noop, as its not added to the internal publisher queue. """ # Do an initial publishing of the charm scheduled = yield self.publisher.add_charm(self.charm_id, self.charm) self.assertTrue(scheduled) result = yield self.publisher.publish() self.assertEqual(result[0].name, self.charm.metadata.name) publisher = CharmPublisher(self.client, self.storage) scheduled = yield publisher.add_charm(self.charm_id, self.charm) self.assertFalse(scheduled) scheduled = yield publisher.add_charm(self.charm_id, self.charm) self.assertFalse(scheduled) result = yield publisher.publish() self.assertEqual(result[0].name, self.charm.metadata.name) self.assertEqual(result[1].name, self.charm.metadata.name)
class ClientSessionTests(ZookeeperTestCase): def setUp(self): super(ClientSessionTests, self).setUp() self.cluster.start() self.client = None self.client2 = None zookeeper.deterministic_conn_order(True) zookeeper.set_debug_level(0) @property def cluster(self): return CLUSTER def tearDown(self): super(ClientSessionTests, self).tearDown() if self.client: self.client.close() if self.client2: self.client2.close() self.cluster.reset() @inlineCallbacks def test_client_session_migration(self): """A client will automatically rotate servers to ensure a connection. A client connected to multiple servers, will transparently migrate amongst them, as individual servers can no longer be reached. A client's session will be maintined. """ # Connect to the Zookeeper Cluster servers = ",".join([s.address for s in self.cluster]) self.client = ZookeeperClient(servers) yield self.client.connect() yield self.client.create("/hello", flags=zookeeper.EPHEMERAL) # Shutdown the server the client is connected to self.cluster[0].stop() # Wait for the shutdown and cycle, if we don't wait we'll # get a zookeeper connectionloss exception on occassion. yield self.sleep(0.1) self.assertTrue(self.client.connected) exists = yield self.client.exists("/hello") self.assertTrue(exists) @inlineCallbacks def test_client_watch_migration(self): """On server rotation, extant watches are still active. A client connected to multiple servers, will transparently migrate amongst them, as individual servers can no longer be reached. Watch deferreds issued from the same client instance will continue to function as the session is maintained. """ session_events = [] def session_event_callback(connection, e): session_events.append(e) # Connect to the Zookeeper Cluster servers = ",".join([s.address for s in self.cluster]) self.client = ZookeeperClient(servers) self.client.set_session_callback(session_event_callback) yield self.client.connect() # Setup a watch yield self.client.create("/hello") exists_d, watch_d = self.client.exists_and_watch("/hello") yield exists_d # Shutdown the server the client is connected to self.cluster[0].stop() # Wait for the shutdown and cycle, if we don't wait we'll # get occasionally get a zookeeper connectionloss exception. yield self.sleep(0.1) # The session events that would have been ignored are sent # to the session event callback. self.assertTrue(session_events) self.assertTrue(self.client.connected) # If we delete the node, we'll see the watch fire. yield self.client.delete("/hello") event = yield watch_d self.assertEqual(event.type_name, "deleted") self.assertEqual(event.path, "/hello") @inlineCallbacks def test_connection_error_handler(self): """A callback can be specified for connection errors. We can specify a callback for connection errors, that can perform recovery for a disconnected client. """ @inlineCallbacks def connection_error_handler(connection, error): # Moved management of this connection attribute out of the # default behavior for a connection exception, to support # the retry facade. Under the hood libzk is going to be # trying to transparently reconnect connection.connected = False # On loss of the connection, reconnect the client w/ same session. connection.close() yield connection.connect( self.cluster[1].address, client_id=connection.client_id) returnValue(23) self.client = ZookeeperClient(self.cluster[0].address) self.client.set_connection_error_callback(connection_error_handler) yield self.client.connect() yield self.client.create("/hello") exists_d, watch_d = self.client.exists_and_watch("/hello") yield exists_d # Shutdown the server the client is connected to self.cluster[0].stop() yield self.sleep(0.1) # Results in connection loss exception, and invoking of error handler. result = yield self.client.exists("/hello") # The result of the error handler is returned to the api self.assertEqual(result, 23) exists = yield self.client.exists("/hello") self.assertTrue(exists) @inlineCallbacks def test_client_session_expiration_event(self): """A client which recieves a session expiration event. """ session_events = [] events_received = Deferred() def session_event_callback(connection, e): session_events.append(e) if len(session_events) == 8: events_received.callback(True) # Connect to a node in the cluster and establish a watch self.client = ZookeeperClient(self.cluster[0].address) self.client.set_session_callback(session_event_callback) yield self.client.connect() # Setup some watches to verify they are cleaned out on expiration. d, e_watch_d = self.client.exists_and_watch("/") yield d d, g_watch_d = self.client.get_and_watch("/") yield d d, c_watch_d = self.client.get_children_and_watch("/") yield d # Connect a client to the same session on a different node. self.client2 = ZookeeperClient(self.cluster[1].address) yield self.client2.connect(client_id=self.client.client_id) # Close the new client and wait for the event propogation yield self.client2.close() # It can take some time for this to propagate yield events_received self.assertEqual(len(session_events), 8) # The last four (conn + 3 watches) are all expired for evt in session_events[4:]: self.assertEqual(evt.state_name, "expired") # The connection is dead without reconnecting. yield self.assertFailure( self.client.exists("/"), NotConnectedException, ConnectionException) self.assertTrue(self.client.unrecoverable) yield self.assertFailure(e_watch_d, zookeeper.SessionExpiredException) yield self.assertFailure(g_watch_d, zookeeper.SessionExpiredException) yield self.assertFailure(c_watch_d, zookeeper.SessionExpiredException) # If a reconnect attempt is made with a dead session id client_id = self.client.client_id self.client.close() # Free the handle yield self.client.connect(client_id=client_id) yield self.assertFailure( self.client.get_children("/"), NotConnectedException, ConnectionException) test_client_session_expiration_event.timeout = 10 @inlineCallbacks def test_client_reconnect_session_on_different_server(self): """On connection failure, An application can choose to use a new connection with which to reconnect to a different member of the zookeeper cluster, reacquiring the extant session. A large obvious caveat to using a new client instance rather than reconnecting the existing client, is that even though the session has outstanding watches, the watch callbacks/deferreds won't be active unless the client instance used to create them is connected. """ session_events = [] def session_event_callback(connection, e): session_events.append(e) # Connect to a node in the cluster and establish a watch self.client = ZookeeperClient(self.cluster[2].address, session_timeout=5000) self.client.set_session_callback(session_event_callback) yield self.client.connect() yield self.client.create("/hello", flags=zookeeper.EPHEMERAL) self.assertTrue((yield self.client.exists("/hello"))) # Shutdown the server the client is connected to self.cluster[2].stop() yield self.sleep(0.1) # Verify we got a session event regarding the down server self.assertTrue(session_events) # Open up a new connection to a different server with same session self.client2 = ZookeeperClient(self.cluster[0].address) yield self.client2.connect(client_id=self.client.client_id) # Close the old disconnected client self.client.close() # Verify the ephemeral still exists exists = yield self.client2.exists("/hello") self.assertTrue(exists) # Destroy the session and reconnect self.client2.close() yield self.client.connect(self.cluster[0].address) # Ephemeral is destroyed when the session closed. exists = yield self.client.exists("/hello") self.assertFalse(exists) @inlineCallbacks def test_managed_client_backoff(self): output = self.capture_log(level=logging.DEBUG) self.patch(managed, 'BACKOFF_INCREMENT', 2) self.client = yield managed.ManagedClient( self.cluster[0].address, connect_timeout=4).connect() self.client2 = yield ZookeeperClient(self.cluster[1].address).connect() exists_d, watch_d = self.client.exists_and_watch("/hello") yield exists_d yield self.client2.create("/hello", "world") yield self.client2.close() self.cluster[0].stop() yield self.sleep(1) # Try to do something with the connection while its down. ops = [] ops.append(self.client.create('/abc', 'test')) ops.append(self.client.get("/hello")) ops.append(self.client.get_children("/")) ops.append(self.client.set("/hello", "sad")) # Sleep and let the session expire, and ensure we're down long enough # for backoff to trigger. yield self.sleep(10) # Start the cluster and watch things work self.cluster[0].run() yield DeferredList( ops, fireOnOneErrback=True, consumeErrors=True) yield watch_d # Verify we backed off at least once self.assertIn("Backing off reconnect", output.getvalue()) # Verify we only reconnected once self.assertTrue(output.getvalue().count("Restablished connection"), 1) test_managed_client_backoff.timeout = 25
class WatchDeliveryConnectionFailedTest(ZookeeperTestCase): """Watches are still sent on reconnect. """ def setUp(self): super(WatchDeliveryConnectionFailedTest, self).setUp() self.proxy = ProxyFactory("127.0.0.1", 2181) self.proxy_port = reactor.listenTCP(0, self.proxy) host = self.proxy_port.getHost() self.proxied_client = ZookeeperClient( "%s:%s" % (host.host, host.port)) self.direct_client = ZookeeperClient("127.0.0.1:2181", 3000) self.session_events = [] def session_event_collector(conn, event): self.session_events.append(event) self.proxied_client.set_session_callback(session_event_collector) return self.direct_client.connect() @inlineCallbacks def tearDown(self): zookeeper.set_debug_level(0) if self.proxied_client.connected: yield self.proxied_client.close() if not self.direct_client.connected: yield self.direct_client.connect() utils.deleteTree(handle=self.direct_client.handle) yield self.direct_client.close() self.proxy.lose_connection() yield self.proxy_port.stopListening() def verify_events(self, events, expected): """Verify the state of the session events encountered. """ for value, state in zip([e.state_name for e in events], expected): self.assertEqual(value, state) @inlineCallbacks def test_child_watch_fires_upon_reconnect(self): yield self.proxied_client.connect() # Setup tree cpath = "/test-tree" yield self.direct_client.create(cpath) # Setup watch child_d, watch_d = self.proxied_client.get_children_and_watch(cpath) self.assertEqual((yield child_d), []) # Kill the connection and fire the watch self.proxy.lose_connection() yield self.direct_client.create( cpath + "/abc", flags=zookeeper.SEQUENCE) # We should still get the child event. yield watch_d # We get two pairs of (connecting, connected) for the conn and watch self.assertEqual(len(self.session_events), 4) self.verify_events( self.session_events, ("connecting", "connecting", "connected", "connected")) @inlineCallbacks def test_exists_watch_fires_upon_reconnect(self): yield self.proxied_client.connect() cpath = "/test" # Setup watch exists_d, watch_d = self.proxied_client.exists_and_watch(cpath) self.assertEqual((yield exists_d), None) # Kill the connection and fire the watch self.proxy.lose_connection() yield self.direct_client.create(cpath) # We should still get the exists event. yield watch_d # We get two pairs of (connecting, connected) for the conn and watch self.assertEqual(len(self.session_events), 4) self.verify_events( self.session_events, ("connecting", "connecting", "connected", "connected")) @inlineCallbacks def test_get_watch_fires_upon_reconnect(self): yield self.proxied_client.connect() # Setup tree cpath = "/test" yield self.direct_client.create(cpath, "abc") # Setup watch get_d, watch_d = self.proxied_client.get_and_watch(cpath) content, stat = yield get_d self.assertEqual(content, "abc") # Kill the connection and fire the watch self.proxy.lose_connection() yield self.direct_client.set(cpath, "xyz") # We should still get the exists event. yield watch_d # We also two pairs of (connecting, connected) for the conn and watch self.assertEqual(len(self.session_events), 4) self.verify_events( self.session_events, ("connecting", "connecting", "connected", "connected")) @inlineCallbacks def test_watch_delivery_failure_resends(self): """Simulate a network failure for the watch delivery The zk server effectively sends the watch delivery to the client, but the client never recieves it. """ yield self.proxied_client.connect() cpath = "/test" # Setup watch exists_d, watch_d = self.proxied_client.exists_and_watch(cpath) self.assertEqual((yield exists_d), None) # Pause the connection fire the watch, and blackhole the data. self.proxy.set_blocked(True) yield self.direct_client.create(cpath) self.proxy.set_blocked(False) self.proxy.lose_connection() # We should still get the exists event. yield watch_d @inlineCallbacks def xtest_binding_bug_session_exception(self): """This test triggers an exception in the python-zookeeper binding. File "txzookeeper/client.py", line 491, in create self.handle, path, data, acls, flags, callback) exceptions.SystemError: error return without exception set """ yield self.proxied_client.connect() data_d, watch_d = yield self.proxied_client.exists_and_watch("/") self.assertTrue((yield data_d)) self.proxy.set_blocked(True) # Wait for session expiration, on a single server options are limited yield self.sleep(15) # Unblock the proxy for next connect, and then drop the connection. self.proxy.set_blocked(False) self.proxy.lose_connection() # Wait for a reconnect yield self.assertFailure(watch_d, zookeeper.SessionExpiredException) # Leads to bindings bug failure yield self.assertFailure( self.proxied_client.get("/a"), zookeeper.SessionExpiredException) self.assertEqual(self.session_events[-1].state_name, "expired")
class NodeTest(TestCase): def setUp(self): super(NodeTest, self).setUp() zookeeper.set_debug_level(zookeeper.LOG_LEVEL_ERROR) self.client = ZookeeperClient("127.0.0.1:2181", 2000) d = self.client.connect() self.client2 = None def create_zoo(client): client.create("/zoo") d.addCallback(create_zoo) return d def tearDown(self): super(NodeTest, self).tearDown() deleteTree(handle=self.client.handle) if self.client.connected: self.client.close() if self.client2 and self.client2.connected: self.client2.close() zookeeper.set_debug_level(zookeeper.LOG_LEVEL_DEBUG) def _make_digest_identity(self, credentials): user, password = credentials.split(":") digest = hashlib.new("sha1", credentials).digest() return "%s:%s" % (user, base64.b64encode(digest)) def test_node_name_and_path(self): """ Each node has name and path. """ node = ZNode("/zoo/rabbit", self.client) self.assertEqual(node.name, "rabbit") self.assertEqual(node.path, "/zoo/rabbit") def test_node_event_repr(self): """ Node events have a human-readable representation. """ node = ZNode("/zoo", self.client) event = NodeEvent(4, None, node) self.assertEqual(repr(event), "<NodeEvent child at '/zoo'>") @inlineCallbacks def test_node_exists_nonexistant(self): """ A node knows whether it exists or not. """ node = ZNode("/zoo/rabbit", self.client) exists = yield node.exists() self.assertFalse(exists) @inlineCallbacks def test_node_set_data_on_nonexistant(self): """ Setting data on a non existant node raises a no node exception. """ node = ZNode("/zoo/rabbit", self.client) d = node.set_data("big furry ears") self.failUnlessFailure(d, zookeeper.NoNodeException) yield d @inlineCallbacks def test_node_create_set_data(self): """ A node can be created and have its data set. """ node = ZNode("/zoo/rabbit", self.client) data = "big furry ears" yield node.create(data) exists = yield self.client.exists("/zoo/rabbit") self.assertTrue(exists) node_data = yield node.get_data() self.assertEqual(data, node_data) data = data*2 yield node.set_data(data) node_data = yield node.get_data() self.assertEqual(data, node_data) @inlineCallbacks def test_node_get_data(self): """ Data can be fetched from a node. """ yield self.client.create("/zoo/giraffe", "mouse") data = yield ZNode("/zoo/giraffe", self.client).get_data() self.assertEqual(data, "mouse") @inlineCallbacks def test_node_get_data_nonexistant(self): """ Attempting to fetch data from a nonexistant node returns a non existant error. """ d = ZNode("/zoo/giraffe", self.client).get_data() self.failUnlessFailure(d, zookeeper.NoNodeException) yield d @inlineCallbacks def test_node_get_acl(self): """ The ACL for a node can be retrieved. """ yield self.client.create("/zoo/giraffe") acl = yield ZNode("/zoo/giraffe", self.client).get_acl() self.assertEqual(len(acl), 1) self.assertEqual(acl[0]['scheme'], 'world') def test_node_get_acl_nonexistant(self): """ The fetching the ACL for a non-existant node results in an error. """ node = ZNode("/zoo/giraffe", self.client) def assert_failed(failed): if not isinstance(failed, Failure): self.fail("Should have failed") self.assertTrue( isinstance(failed.value, zookeeper.NoNodeException)) d = node.get_acl() d.addBoth(assert_failed) return d @inlineCallbacks def test_node_set_acl(self): """ The ACL for a node can be modified. """ path = yield self.client.create("/zoo/giraffe") credentials = "zebra:moon" acl = [{"id": self._make_digest_identity(credentials), "scheme": "digest", "perms":zookeeper.PERM_ALL}] node = ZNode(path, self.client) # little hack around slow auth issue 770 zookeeper d = self.client.add_auth("digest", credentials) yield node.set_acl(acl) yield d node_acl, stat = yield self.client.get_acl(path) self.assertEqual(node_acl, acl) @inlineCallbacks def test_node_set_data_update_with_cached_exists(self): """ Data can be set on an existing node, updating it in place. """ node = ZNode("/zoo/monkey", self.client) yield self.client.create("/zoo/monkey", "stripes") exists = yield node.exists() self.assertTrue(exists) yield node.set_data("banana") data, stat = yield self.client.get("/zoo/monkey") self.assertEqual(data, "banana") @inlineCallbacks def test_node_set_data_update_with_invalid_cached_exists(self): """ If a node is deleted, attempting to set data on it raises a no node exception. """ node = ZNode("/zoo/monkey", self.client) yield self.client.create("/zoo/monkey", "stripes") exists = yield node.exists() self.assertTrue(exists) yield self.client.delete("/zoo/monkey") d = node.set_data("banana") self.failUnlessFailure(d, zookeeper.NoNodeException) yield d @inlineCallbacks def test_node_set_data_update_with_exists(self): """ Data can be set on an existing node, updating it in place. """ node = ZNode("/zoo/monkey", self.client) yield self.client.create("/zoo/monkey", "stripes") yield node.set_data("banana") data, stat = yield self.client.get("/zoo/monkey") self.assertEqual(data, "banana") @inlineCallbacks def test_node_exists_with_watch_nonexistant(self): """ The node's existance can be checked with the exist_watch api a deferred will be returned and any node level events, created, deleted, modified invoke the callback. You can get these create event callbacks for non existant nodes. """ node = ZNode("/zoo/elephant", self.client) exists, watch = yield node.exists_and_watch() self.assertFalse((yield exists)) yield self.client.create("/zoo/elephant") event = yield watch self.assertEqual(event.type, zookeeper.CREATED_EVENT) self.assertEqual(event.path, node.path) @inlineCallbacks def test_node_get_data_with_watch_on_update(self): """ Subscribing to a node will get node update events. """ yield self.client.create("/zoo/elephant") node = ZNode("/zoo/elephant", self.client) data, watch = yield node.get_data_and_watch() yield self.client.set("/zoo/elephant") event = yield watch self.assertEqual(event.type, zookeeper.CHANGED_EVENT) self.assertEqual(event.path, "/zoo/elephant") @inlineCallbacks def test_node_get_data_with_watch_on_delete(self): """ Subscribing to a node will get node deletion events. """ yield self.client.create("/zoo/elephant") node = ZNode("/zoo/elephant", self.client) data, watch = yield node.get_data_and_watch() yield self.client.delete("/zoo/elephant") event = yield watch self.assertEqual(event.type, zookeeper.DELETED_EVENT) self.assertEqual(event.path, "/zoo/elephant") @inlineCallbacks def test_node_children(self): """ A node's children can be introspected. """ node = ZNode("/zoo", self.client) node_path_a = yield self.client.create("/zoo/lion") node_path_b = yield self.client.create("/zoo/tiger") children = yield node.get_children() children.sort() self.assertEqual(children[0].path, node_path_a) self.assertEqual(children[1].path, node_path_b) @inlineCallbacks def test_node_children_by_prefix(self): """ A node's children can be introspected optionally with a prefix. """ node = ZNode("/zoo", self.client) node_path_a = yield self.client.create("/zoo/lion") yield self.client.create("/zoo/tiger") children = yield node.get_children("lion") children.sort() self.assertEqual(children[0].path, node_path_a) self.assertEqual(len(children), 1) @inlineCallbacks def test_node_get_children_with_watch_create(self): """ A node's children can explicitly be watched to given existance events for node creation and destruction. """ node = ZNode("/zoo", self.client) children, watch = yield node.get_children_and_watch() yield self.client.create("/zoo/lion") event = yield watch self.assertEqual(event.path, "/zoo") self.assertEqual(event.type, zookeeper.CHILD_EVENT) self.assertEqual(event.type_name, "child") @inlineCallbacks def test_node_get_children_with_watch_delete(self): """ A node's children can explicitly be watched to given existance events for node creation and destruction. """ node = ZNode("/zoo", self.client) yield self.client.create("/zoo/lion") children, watch = yield node.get_children_and_watch() yield self.client.delete("/zoo/lion") event = yield watch self.assertEqual(event.path, "/zoo") self.assertEqual(event.type, zookeeper.CHILD_EVENT) @inlineCallbacks def test_bad_version_error(self): """ The node captures the node version on any read operations, which it utilizes for write operations. On a concurrent modification error the node return a bad version error, this also clears the cached state so subsequent modifications will be against the latest version, unless the cache is seeded again by a read operation. """ node = ZNode("/zoo/lion", self.client) self.client2 = ZookeeperClient("127.0.0.1:2181") yield self.client2.connect() yield self.client.create("/zoo/lion", "mouse") yield node.get_data() yield self.client2.set("/zoo/lion", "den2") data = yield self.client.exists("/zoo/lion") self.assertEqual(data['version'], 1) d = node.set_data("zebra") self.failUnlessFailure(d, zookeeper.BadVersionException) yield d # after failure the cache is deleted, and a set proceeds yield node.set_data("zebra") data = yield node.get_data() self.assertEqual(data, "zebra")
class YAMLStateTest(TestCase): @inlineCallbacks def setUp(self): zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) yield self.client.connect() self.path = "/zoo" @inlineCallbacks def tearDown(self): exists = yield self.client.exists(self.path) if exists: yield remove_tree(self.client, self.path) @inlineCallbacks def test_get_empty(self): """Verify getting an empty node works as expected.""" path = yield self.client.create(self.path) node = YAMLState(self.client, path) self.assertEqual(node, {}) @inlineCallbacks def test_access_wo_create(self): """Verify accessing data for a non-existant node works as expected.""" node = YAMLState(self.client, self.path) yield node.read() self.assertEqual(node, {}) def test_set_wo_read(self): """Verify that not calling read before mutation raises.""" node = YAMLState(self.client, self.path) self.assertRaises(ValueError, node.__setitem__, "alpha", "beta") self.assertRaises(ValueError, node.update, {"alpha": "beta"}) @inlineCallbacks def test_set_wo_write(self): """Check that get resolves from the internal write buffer. set/get pairs w/o write should present a view of the state reflecting local change. Verify that w/o write local data appears on subsequent calls but that zk state hasn't been changed. """ path = yield self.client.create(self.path) node = YAMLState(self.client, path) yield node.read() options = dict(alpha="beta", one=1) node.update(options) self.assertEqual(node, options) zk_data, stat = yield self.client.get(self.path) # the node isn't created yet in zk self.assertEqual(zk_data, "") @inlineCallbacks def test_set_w_write(self): """Verify that write updates the local and zk state. When write is called we expect that zk state reflects this. We also expect calls to get to expect the reflected state. """ node = YAMLState(self.client, self.path) yield node.read() options = dict(alpha="beta", one=1) node.update(options) changes = yield node.write() self.assertEqual( set(changes), set([ AddedItem(key='alpha', new='beta'), AddedItem(key='one', new=1) ])) # a local get should reflect proper data self.assertEqual(node, options) # and a direct look at zk should work as well zk_data, stat = yield self.client.get(self.path) zk_data = yaml.load(zk_data) self.assertEqual(zk_data, options) @inlineCallbacks def test_conflict_on_set(self): """Version conflict error tests. Test that two YAMLState objects writing to the same path can and will throw version errors when elements become out of read. """ node = YAMLState(self.client, self.path) node2 = YAMLState(self.client, self.path) yield node.read() yield node2.read() options = dict(alpha="beta", one=1) node.update(options) yield node.write() node2.update(options) changes = yield node2.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("one", 1)])) # first read node2 self.assertEqual(node, options) # write on node 1 options2 = dict(alpha="gamma", one="two") node.update(options2) changes = yield node.write() self.assertEqual( set(changes), set([ ModifiedItem("alpha", "beta", "gamma"), ModifiedItem("one", 1, "two") ])) # verify that node 1 reports as expected self.assertEqual(node, options2) # verify that node2 has the older data still self.assertEqual(node2, options) # now issue a set/write from node2 # this will merge the data deleting 'one' # and updating other values options3 = dict(alpha="cappa", new="next") node2.update(options3) del node2["one"] expected = dict(alpha="cappa", new="next") changes = yield node2.write() self.assertEqual( set(changes), set([ DeletedItem("one", 1), ModifiedItem("alpha", "beta", "cappa"), AddedItem("new", "next") ])) self.assertEqual(expected, node2) # but node still reflects the old data self.assertEqual(node, options2) @inlineCallbacks def test_setitem(self): node = YAMLState(self.client, self.path) yield node.read() options = dict(alpha="beta", one=1) node["alpha"] = "beta" node["one"] = 1 changes = yield node.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("one", 1)])) # a local get should reflect proper data self.assertEqual(node, options) # and a direct look at zk should work as well zk_data, stat = yield self.client.get(self.path) zk_data = yaml.load(zk_data) self.assertEqual(zk_data, options) @inlineCallbacks def test_multiple_reads(self): """Calling read resets state to ZK after multiple round-trips.""" node = YAMLState(self.client, self.path) yield node.read() node.update({"alpha": "beta", "foo": "bar"}) self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "bar") yield node.read() # A read resets the data to the empty state self.assertEqual(node, {}) node.update({"alpha": "beta", "foo": "bar"}) changes = yield node.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("foo", "bar")])) # A write retains the newly set values self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "bar") # now get another state instance and change zk state node2 = YAMLState(self.client, self.path) yield node2.read() node2.update({"foo": "different"}) changes = yield node2.write() self.assertEqual(changes, [ModifiedItem("foo", "bar", "different")]) # This should pull in the new state (and still have the merged old. yield node.read() self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "different") def test_dictmixin_usage(self): """Verify that the majority of dict operation function.""" node = YAMLState(self.client, self.path) yield node.read() node.update({"alpha": "beta", "foo": "bar"}) self.assertEqual(node, {"alpha": "beta", "foo": "bar"}) result = node.pop("foo") self.assertEqual(result, "bar") self.assertEqual(node, {"alpha": "beta"}) node["delta"] = "gamma" self.assertEqual(set(node.keys()), set(("alpha", "delta"))) result = list(node.iteritems()) self.assertIn(("alpha", "beta"), result) self.assertIn(("delta", "gamma"), result) @inlineCallbacks def test_del_empties_state(self): d = YAMLState(self.client, self.path) yield d.read() d["a"] = "foo" changes = yield d.write() self.assertEqual(changes, [AddedItem("a", "foo")]) del d["a"] changes = yield d.write() self.assertEqual(changes, [DeletedItem("a", "foo")]) self.assertEqual(d, {}) @inlineCallbacks def test_read_resync(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1["a"] = "foo" changes = yield d1.write() self.assertEqual(changes, [AddedItem("a", "foo")]) d2 = YAMLState(self.client, self.path) yield d2.read() del d2["a"] changes = yield d2.write() self.assertEqual(changes, [DeletedItem("a", "foo")]) d2["a"] = "bar" changes = yield d2.write() self.assertEqual(changes, [AddedItem("a", "bar")]) zk_data, stat = yield self.client.get(self.path) yield d1.read() # d1 should pick up the new value (from d2) on a read zk_data, stat = yield self.client.get(self.path) self.assertEqual(d1["a"], "bar") @inlineCallbacks def test_multiple_writes(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1.update(dict(foo="bar", this="that")) changes = yield d1.write() self.assertEqual( set(changes), set([AddedItem("foo", "bar"), AddedItem("this", "that")])) del d1["this"] d1["another"] = "value" changes = yield d1.write() self.assertEqual( set(changes), set([DeletedItem("this", "that"), AddedItem("another", "value")])) expected = {"foo": "bar", "another": "value"} self.assertEqual(d1, expected) changes = yield d1.write() self.assertEqual(changes, []) self.assertEqual(d1, expected) yield d1.read() self.assertEqual(d1, expected) # This shouldn't write any changes changes = yield d1.write() self.assertEqual(changes, []) self.assertEqual(d1, expected) @inlineCallbacks def test_write_twice(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1["a"] = "foo" changes = yield d1.write() self.assertEqual(changes, [AddedItem("a", "foo")]) d2 = YAMLState(self.client, self.path) yield d2.read() d2["a"] = "bar" changes = yield d2.write() self.assertEqual(changes, [ModifiedItem("a", "foo", "bar")]) # Shouldn't write again. Changes were already # flushed and acted upon by other parties. changes = yield d1.write() self.assertEqual(changes, []) yield d1.read() self.assertEquals(d1, d2) @inlineCallbacks def test_read_requires_node(self): """Validate that read raises when required=True.""" d1 = YAMLState(self.client, self.path) yield self.assertFailure(d1.read(True), StateNotFound)
class RetryChangeTest(ZookeeperTestCase): def update_function_increment(self, content, stat): if not content: return str(0) return str(int(content) + 1) def setUp(self): super(RetryChangeTest, self).setUp() self.client = ZookeeperClient("127.0.0.1:2181") return self.client.connect() def tearDown(self): utils.deleteTree("/", self.client.handle) self.client.close() @inlineCallbacks def test_node_create(self): """ retry_change will create a node if one does not exist. """ #use a mock to ensure the change function is only invoked once func = self.mocker.mock() func(None, None) self.mocker.result("hello") self.mocker.replay() yield retry_change( self.client, "/magic-beans", func) content, stat = yield self.client.get("/magic-beans") self.assertEqual(content, "hello") self.assertEqual(stat["version"], 0) @inlineCallbacks def test_node_update(self): """ retry_change will update an existing node. """ #use a mock to ensure the change function is only invoked once func = self.mocker.mock() func("", MATCH_STAT) self.mocker.result("hello") self.mocker.replay() yield self.client.create("/magic-beans") yield retry_change( self.client, "/magic-beans", func) content, stat = yield self.client.get("/magic-beans") self.assertEqual(content, "hello") self.assertEqual(stat["version"], 1) def test_error_in_change_function_propogates(self): """ an error in the change function propogates to the caller. """ def error_function(content, stat): raise SyntaxError() d = retry_change(self.client, "/magic-beans", error_function) self.failUnlessFailure(d, SyntaxError) return d @inlineCallbacks def test_concurrent_update_bad_version(self): """ If the node is updated after the retry function has read the node but before the content is set, the retry function will perform another read/change_func/set cycle. """ yield self.client.create("/animals") content, stat = yield self.client.get("/animals") yield self.client.set("/animals", "5") real_get = self.client.get p_client = self.mocker.proxy(self.client) p_client.get("/animals") self.mocker.result(succeed((content, stat))) p_client.get("/animals") self.mocker.call(real_get) self.mocker.replay() yield retry_change( p_client, "/animals", self.update_function_increment) content, stat = yield real_get("/animals") self.assertEqual(content, "6") self.assertEqual(stat["version"], 2) @inlineCallbacks def test_create_node_exists(self): """ If the node is created after the retry function has determined the node doesn't exist but before the node is created by the retry function. the retry function will perform another read/change_func/set cycle. """ yield self.client.create("/animals", "5") real_get = self.client.get p_client = self.mocker.patch(self.client) p_client.get("/animals") self.mocker.result(fail(zookeeper.NoNodeException())) p_client.get("/animals") self.mocker.call(real_get) self.mocker.replay() yield retry_change( p_client, "/animals", self.update_function_increment) content, stat = yield real_get("/animals") self.assertEqual(content, "6") self.assertEqual(stat["version"], 1) def test_set_node_does_not_exist(self): """ if the retry function goes to update a node which has been deleted since it was read, it will cycle through to another read/change_func set cycle. """ real_get = self.client.get p_client = self.mocker.patch(self.client) p_client.get("/animals") self.mocker.result("5", {"version": 1}) p_client.get("/animals") self.mocker.call(real_get) yield retry_change( p_client, "/animals", self.update_function_increment) content, stat = yield real_get("/animals") self.assertEqual(content, "0") self.assertEqual(stat["version"], 0) def test_identical_content_noop(self): """ If the change function generates identical content to the existing node, the retry change function exits without modifying the node. """ self.client.create("/animals", "hello") def update(content, stat): return content yield retry_change(self.client, "/animals", update) content, stat = self.client.get("/animals") self.assertEqual(content, "hello") self.assertEqual(stat["version"], 0)
class YAMLStateTest(TestCase): @inlineCallbacks def setUp(self): zookeeper.set_debug_level(0) self.client = ZookeeperClient(get_test_zookeeper_address()) yield self.client.connect() self.path = "/zoo" @inlineCallbacks def tearDown(self): exists = yield self.client.exists(self.path) if exists: yield remove_tree(self.client, self.path) @inlineCallbacks def test_get_empty(self): """Verify getting an empty node works as expected.""" path = yield self.client.create(self.path) node = YAMLState(self.client, path) self.assertEqual(node, {}) @inlineCallbacks def test_access_wo_create(self): """Verify accessing data for a non-existant node works as expected.""" node = YAMLState(self.client, self.path) yield node.read() self.assertEqual(node, {}) def test_set_wo_read(self): """Verify that not calling read before mutation raises.""" node = YAMLState(self.client, self.path) self.assertRaises(ValueError, node.__setitem__, "alpha", "beta") self.assertRaises(ValueError, node.update, {"alpha": "beta"}) @inlineCallbacks def test_set_wo_write(self): """Check that get resolves from the internal write buffer. set/get pairs w/o write should present a view of the state reflecting local change. Verify that w/o write local data appears on subsequent calls but that zk state hasn't been changed. """ path = yield self.client.create(self.path) node = YAMLState(self.client, path) yield node.read() options = dict(alpha="beta", one=1) node.update(options) self.assertEqual(node, options) zk_data, stat = yield self.client.get(self.path) # the node isn't created yet in zk self.assertEqual(zk_data, "") @inlineCallbacks def test_set_w_write(self): """Verify that write updates the local and zk state. When write is called we expect that zk state reflects this. We also expect calls to get to expect the reflected state. """ node = YAMLState(self.client, self.path) yield node.read() options = dict(alpha="beta", one=1) node.update(options) changes = yield node.write() self.assertEqual( set(changes), set([AddedItem(key='alpha', new='beta'), AddedItem(key='one', new=1)])) # a local get should reflect proper data self.assertEqual(node, options) # and a direct look at zk should work as well zk_data, stat = yield self.client.get(self.path) zk_data = yaml.load(zk_data) self.assertEqual(zk_data, options) @inlineCallbacks def test_conflict_on_set(self): """Version conflict error tests. Test that two YAMLState objects writing to the same path can and will throw version errors when elements become out of read. """ node = YAMLState(self.client, self.path) node2 = YAMLState(self.client, self.path) yield node.read() yield node2.read() options = dict(alpha="beta", one=1) node.update(options) yield node.write() node2.update(options) changes = yield node2.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("one", 1)])) # first read node2 self.assertEqual(node, options) # write on node 1 options2 = dict(alpha="gamma", one="two") node.update(options2) changes = yield node.write() self.assertEqual( set(changes), set([ModifiedItem("alpha", "beta", "gamma"), ModifiedItem("one", 1, "two")])) # verify that node 1 reports as expected self.assertEqual(node, options2) # verify that node2 has the older data still self.assertEqual(node2, options) # now issue a set/write from node2 # this will merge the data deleting 'one' # and updating other values options3 = dict(alpha="cappa", new="next") node2.update(options3) del node2["one"] expected = dict(alpha="cappa", new="next") changes = yield node2.write() self.assertEqual( set(changes), set([DeletedItem("one", 1), ModifiedItem("alpha", "beta", "cappa"), AddedItem("new", "next")])) self.assertEqual(expected, node2) # but node still reflects the old data self.assertEqual(node, options2) @inlineCallbacks def test_setitem(self): node = YAMLState(self.client, self.path) yield node.read() options = dict(alpha="beta", one=1) node["alpha"] = "beta" node["one"] = 1 changes = yield node.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("one", 1)])) # a local get should reflect proper data self.assertEqual(node, options) # and a direct look at zk should work as well zk_data, stat = yield self.client.get(self.path) zk_data = yaml.load(zk_data) self.assertEqual(zk_data, options) @inlineCallbacks def test_multiple_reads(self): """Calling read resets state to ZK after multiple round-trips.""" node = YAMLState(self.client, self.path) yield node.read() node.update({"alpha": "beta", "foo": "bar"}) self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "bar") yield node.read() # A read resets the data to the empty state self.assertEqual(node, {}) node.update({"alpha": "beta", "foo": "bar"}) changes = yield node.write() self.assertEqual( set(changes), set([AddedItem("alpha", "beta"), AddedItem("foo", "bar")])) # A write retains the newly set values self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "bar") # now get another state instance and change zk state node2 = YAMLState(self.client, self.path) yield node2.read() node2.update({"foo": "different"}) changes = yield node2.write() self.assertEqual( changes, [ModifiedItem("foo", "bar", "different")]) # This should pull in the new state (and still have the merged old. yield node.read() self.assertEqual(node["alpha"], "beta") self.assertEqual(node["foo"], "different") def test_dictmixin_usage(self): """Verify that the majority of dict operation function.""" node = YAMLState(self.client, self.path) yield node.read() node.update({"alpha": "beta", "foo": "bar"}) self.assertEqual(node, {"alpha": "beta", "foo": "bar"}) result = node.pop("foo") self.assertEqual(result, "bar") self.assertEqual(node, {"alpha": "beta"}) node["delta"] = "gamma" self.assertEqual(set(node.keys()), set(("alpha", "delta"))) result = list(node.iteritems()) self.assertIn(("alpha", "beta"), result) self.assertIn(("delta", "gamma"), result) @inlineCallbacks def test_del_empties_state(self): d = YAMLState(self.client, self.path) yield d.read() d["a"] = "foo" changes = yield d.write() self.assertEqual(changes, [AddedItem("a", "foo")]) del d["a"] changes = yield d.write() self.assertEqual(changes, [DeletedItem("a", "foo")]) self.assertEqual(d, {}) @inlineCallbacks def test_read_resync(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1["a"] = "foo" changes = yield d1.write() self.assertEqual(changes, [AddedItem("a", "foo")]) d2 = YAMLState(self.client, self.path) yield d2.read() del d2["a"] changes = yield d2.write() self.assertEqual(changes, [DeletedItem("a", "foo")]) d2["a"] = "bar" changes = yield d2.write() self.assertEqual(changes, [AddedItem("a", "bar")]) zk_data, stat = yield self.client.get(self.path) yield d1.read() # d1 should pick up the new value (from d2) on a read zk_data, stat = yield self.client.get(self.path) self.assertEqual(d1["a"], "bar") @inlineCallbacks def test_multiple_writes(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1.update(dict(foo="bar", this="that")) changes = yield d1.write() self.assertEqual( set(changes), set([AddedItem("foo", "bar"), AddedItem("this", "that")])) del d1["this"] d1["another"] = "value" changes = yield d1.write() self.assertEqual( set(changes), set([DeletedItem("this", "that"), AddedItem("another", "value")])) expected = {"foo": "bar", "another": "value"} self.assertEqual(d1, expected) changes = yield d1.write() self.assertEqual(changes, []) self.assertEqual(d1, expected) yield d1.read() self.assertEqual(d1, expected) # This shouldn't write any changes changes = yield d1.write() self.assertEqual(changes, []) self.assertEqual(d1, expected) @inlineCallbacks def test_write_twice(self): d1 = YAMLState(self.client, self.path) yield d1.read() d1["a"] = "foo" changes = yield d1.write() self.assertEqual(changes, [AddedItem("a", "foo")]) d2 = YAMLState(self.client, self.path) yield d2.read() d2["a"] = "bar" changes = yield d2.write() self.assertEqual(changes, [ModifiedItem("a", "foo", "bar")]) # Shouldn't write again. Changes were already # flushed and acted upon by other parties. changes = yield d1.write() self.assertEqual(changes, []) yield d1.read() self.assertEquals(d1, d2) @inlineCallbacks def test_read_requires_node(self): """Validate that read raises when required=True.""" d1 = YAMLState(self.client, self.path) yield self.assertFailure(d1.read(True), StateNotFound)
class CharmPublisherTest(RepositoryTestBase): @inlineCallbacks def setUp(self): super(CharmPublisherTest, self).setUp() zookeeper.set_debug_level(0) self.charm = CharmDirectory(self.sample_dir1) self.charm_id = local_charm_id(self.charm) self.charm_key = under.quote(self.charm_id) # provider storage key self.charm_storage_key = under.quote( "%s:%s" % (self.charm_id, self.charm.get_sha256())) self.client = ZookeeperClient(get_test_zookeeper_address()) self.storage_dir = self.makeDir() self.storage = FileStorage(self.storage_dir) self.publisher = CharmPublisher(self.client, self.storage) yield self.client.connect() yield self.client.create("/charms") def tearDown(self): deleteTree("/", self.client.handle) self.client.close() super(CharmPublisherTest, self).tearDown() @inlineCallbacks def test_add_charm_and_publish(self): open_file_count = _count_open_files() yield self.publisher.add_charm(self.charm_id, self.charm) result = yield self.publisher.publish() self.assertEquals(_count_open_files(), open_file_count) children = yield self.client.get_children("/charms") self.assertEqual(children, [self.charm_key]) fh = yield self.storage.get(self.charm_storage_key) bundle = CharmBundle(fh) self.assertEqual(self.charm.get_sha256(), bundle.get_sha256()) self.assertEqual( result[0].bundle_url, "file://%s/%s" % (self.storage_dir, self.charm_storage_key)) @inlineCallbacks def test_published_charm_sans_unicode(self): yield self.publisher.add_charm(self.charm_id, self.charm) yield self.publisher.publish() data, stat = yield self.client.get("/charms/%s" % self.charm_key) self.assertNotIn("unicode", data) @inlineCallbacks def test_add_charm_with_concurrent(self): """ Publishing a charm, that has become published concurrent, after the add_charm, works fine. it will write to storage regardless. The use of a sha256 as part of the storage key is utilized to help ensure uniqueness of bits. The sha256 is also stored with the charm state. This relation betewen the charm state and the binary bits, helps guarantee the property that any published charm in zookeeper will use the binary bits that it was published with. """ yield self.publisher.add_charm(self.charm_id, self.charm) concurrent_publisher = CharmPublisher(self.client, self.storage) charm = CharmDirectory(self.sample_dir1) yield concurrent_publisher.add_charm(self.charm_id, charm) yield self.publisher.publish() # modify the charm to create a conflict scenario self.makeFile("zebra", path=os.path.join(self.sample_dir1, "junk.txt")) # assert the charm now has a different sha post modification modified_charm_sha = charm.get_sha256() self.assertNotEqual(modified_charm_sha, self.charm.get_sha256()) # verify publishing raises a stateerror def verify_failure(result): if not isinstance(result, Failure): self.fail("Should have raised state error") result.trap(StateChanged) return True yield concurrent_publisher.publish().addBoth(verify_failure) # verify the zk state charm_nodes = yield self.client.get_children("/charms") self.assertEqual(charm_nodes, [self.charm_key]) content, stat = yield self.client.get("/charms/%s" % charm_nodes[0]) # assert the checksum matches the initially published checksum self.assertEqual(yaml.load(content)["sha256"], self.charm.get_sha256()) store_path = os.path.join(self.storage_dir, self.charm_storage_key) self.assertTrue(os.path.exists(store_path)) # and the binary bits where stored modified_charm_storage_key = under.quote( "%s:%s" % (self.charm_id, modified_charm_sha)) modified_store_path = os.path.join(self.storage_dir, modified_charm_storage_key) self.assertTrue(os.path.exists(modified_store_path)) @inlineCallbacks def test_add_charm_with_concurrent_removal(self): """ If a charm is published, and it detects that the charm exists already exists, it will attempt to retrieve the charm state to verify there is no checksum mismatch. If concurrently the charm is removed, the publisher should fail with a statechange error. """ manager = self.mocker.patch(CharmStateManager) manager.get_charm_state(self.charm_id) self.mocker.passthrough() def match_charm_bundle(bundle): return isinstance(bundle, CharmBundle) def match_charm_url(url): return url.startswith("file://") manager.add_charm_state(self.charm_id, MATCH(match_charm_bundle), MATCH(match_charm_url)) self.mocker.result(fail(zookeeper.NodeExistsException())) manager.get_charm_state(self.charm_id) self.mocker.result(fail(zookeeper.NoNodeException())) self.mocker.replay() yield self.publisher.add_charm(self.charm_id, self.charm) yield self.failUnlessFailure(self.publisher.publish(), StateChanged) @inlineCallbacks def test_add_charm_already_known(self): """Adding an existing charm, is an effective noop, as its not added to the internal publisher queue. """ # Do an initial publishing of the charm scheduled = yield self.publisher.add_charm(self.charm_id, self.charm) self.assertTrue(scheduled) result = yield self.publisher.publish() self.assertEqual(result[0].name, self.charm.metadata.name) publisher = CharmPublisher(self.client, self.storage) scheduled = yield publisher.add_charm(self.charm_id, self.charm) self.assertFalse(scheduled) scheduled = yield publisher.add_charm(self.charm_id, self.charm) self.assertFalse(scheduled) result = yield publisher.publish() self.assertEqual(result[0].name, self.charm.metadata.name) self.assertEqual(result[1].name, self.charm.metadata.name)