Beispiel #1
0
class RemoveTreeTest(TestCase):
    @inlineCallbacks
    def setUp(self):
        yield super(RemoveTreeTest, self).setUp()
        zookeeper.set_debug_level(0)
        self.client = ZookeeperClient(get_test_zookeeper_address())
        yield self.client.connect()

    @inlineCallbacks
    def test_remove_tree(self):
        yield self.client.create("/zoo")
        yield self.client.create("/zoo/mammals")
        yield self.client.create("/zoo/mammals/elephant")
        yield self.client.create("/zoo/reptiles")
        yield self.client.create("/zoo/reptiles/snake")

        yield remove_tree(self.client, "/zoo")

        children = yield self.client.get_children("/")
        self.assertNotIn("zoo", children)
Beispiel #2
0
class RemoveTreeTest(TestCase):

    @inlineCallbacks
    def setUp(self):
        yield super(RemoveTreeTest, self).setUp()
        zookeeper.set_debug_level(0)
        self.client = ZookeeperClient(get_test_zookeeper_address())
        yield self.client.connect()

    @inlineCallbacks
    def test_remove_tree(self):
        yield self.client.create("/zoo")
        yield self.client.create("/zoo/mammals")
        yield self.client.create("/zoo/mammals/elephant")
        yield self.client.create("/zoo/reptiles")
        yield self.client.create("/zoo/reptiles/snake")

        yield remove_tree(self.client, "/zoo")

        children = yield self.client.get_children("/")
        self.assertNotIn("zoo", children)
Beispiel #3
0
class CharmPublisherTest(RepositoryTestBase):

    @inlineCallbacks
    def setUp(self):
        super(CharmPublisherTest, self).setUp()
        zookeeper.set_debug_level(0)

        self.charm = CharmDirectory(self.sample_dir1)
        self.charm_id = local_charm_id(self.charm)
        self.charm_key = under.quote(self.charm_id)
        # provider storage key
        self.charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, self.charm.get_sha256()))

        self.client = ZookeeperClient(get_test_zookeeper_address())
        self.storage_dir = self.makeDir()
        self.storage = FileStorage(self.storage_dir)
        self.publisher = CharmPublisher(self.client, self.storage)

        yield self.client.connect()
        yield self.client.create("/charms")

    def tearDown(self):
        deleteTree("/", self.client.handle)
        self.client.close()
        super(CharmPublisherTest, self).tearDown()

    @inlineCallbacks
    def test_add_charm_and_publish(self):
        open_file_count = _count_open_files()
        yield self.publisher.add_charm(self.charm_id, self.charm)
        result = yield self.publisher.publish()
        self.assertEquals(_count_open_files(), open_file_count)

        children = yield self.client.get_children("/charms")
        self.assertEqual(children, [self.charm_key])
        fh = yield self.storage.get(self.charm_storage_key)
        bundle = CharmBundle(fh)
        self.assertEqual(self.charm.get_sha256(), bundle.get_sha256())

        self.assertEqual(
            result[0].bundle_url, "file://%s/%s" % (
                self.storage_dir, self.charm_storage_key))

    @inlineCallbacks
    def test_published_charm_sans_unicode(self):
        yield self.publisher.add_charm(self.charm_id, self.charm)
        yield self.publisher.publish()
        data, stat = yield self.client.get("/charms/%s" % self.charm_key)
        self.assertNotIn("unicode", data)

    @inlineCallbacks
    def test_add_charm_with_concurrent(self):
        """
        Publishing a charm, that has become published concurrent, after the
        add_charm, works fine. it will write to storage regardless. The use
        of a sha256 as part of the storage key is utilized to help ensure
        uniqueness of bits. The sha256 is also stored with the charm state.

        This relation betewen the charm state and the binary bits, helps
        guarantee the property that any published charm in zookeeper will use
        the binary bits that it was published with.
        """

        yield self.publisher.add_charm(self.charm_id, self.charm)

        concurrent_publisher = CharmPublisher(
            self.client, self.storage)

        charm = CharmDirectory(self.sample_dir1)
        yield concurrent_publisher.add_charm(self.charm_id, charm)

        yield self.publisher.publish()

        # modify the charm to create a conflict scenario
        self.makeFile("zebra",
                      path=os.path.join(self.sample_dir1, "junk.txt"))

        # assert the charm now has a different sha post modification
        modified_charm_sha = charm.get_sha256()
        self.assertNotEqual(
            modified_charm_sha,
            self.charm.get_sha256())

        # verify publishing raises a stateerror
        def verify_failure(result):
            if not isinstance(result, Failure):
                self.fail("Should have raised state error")
            result.trap(StateChanged)
            return True

        yield concurrent_publisher.publish().addBoth(verify_failure)

        # verify the zk state
        charm_nodes = yield self.client.get_children("/charms")
        self.assertEqual(charm_nodes, [self.charm_key])

        content, stat = yield self.client.get(
            "/charms/%s" % charm_nodes[0])

        # assert the checksum matches the initially published checksum
        self.assertEqual(
            yaml.load(content)["sha256"], self.charm.get_sha256())

        store_path = os.path.join(self.storage_dir, self.charm_storage_key)
        self.assertTrue(os.path.exists(store_path))

        # and the binary bits where stored
        modified_charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, modified_charm_sha))
        modified_store_path = os.path.join(
            self.storage_dir, modified_charm_storage_key)
        self.assertTrue(os.path.exists(modified_store_path))

    @inlineCallbacks
    def test_add_charm_with_concurrent_removal(self):
        """
        If a charm is published, and it detects that the charm exists
        already exists, it will attempt to retrieve the charm state to
        verify there is no checksum mismatch. If concurrently the charm
        is removed, the publisher should fail with a statechange error.
        """
        manager = self.mocker.patch(CharmStateManager)

        manager.get_charm_state(self.charm_id)
        self.mocker.passthrough()

        def match_charm_bundle(bundle):
            return isinstance(bundle, CharmBundle)

        def match_charm_url(url):
            return url.startswith("file://")

        manager.add_charm_state(
            self.charm_id, MATCH(match_charm_bundle), MATCH(match_charm_url))
        self.mocker.result(fail(zookeeper.NodeExistsException()))

        manager.get_charm_state(self.charm_id)
        self.mocker.result(fail(zookeeper.NoNodeException()))
        self.mocker.replay()

        yield self.publisher.add_charm(self.charm_id, self.charm)
        yield self.failUnlessFailure(self.publisher.publish(), StateChanged)

    @inlineCallbacks
    def test_add_charm_already_known(self):
        """Adding an existing charm, is an effective noop, as its not added
        to the internal publisher queue.
        """
        # Do an initial publishing of the charm
        scheduled = yield self.publisher.add_charm(self.charm_id, self.charm)
        self.assertTrue(scheduled)
        result = yield self.publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)

        publisher = CharmPublisher(self.client, self.storage)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)

        result = yield publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)
        self.assertEqual(result[1].name, self.charm.metadata.name)
Beispiel #4
0
class ClientSessionTests(ZookeeperTestCase):

    def setUp(self):
        super(ClientSessionTests, self).setUp()
        self.cluster.start()
        self.client = None
        self.client2 = None
        zookeeper.deterministic_conn_order(True)
        zookeeper.set_debug_level(0)

    @property
    def cluster(self):
        return CLUSTER

    def tearDown(self):
        super(ClientSessionTests, self).tearDown()
        if self.client:
            self.client.close()
        self.cluster.reset()

    @inlineCallbacks
    def test_client_session_migration(self):
        """A client will automatically rotate servers to ensure a connection.

        A client connected to multiple servers, will transparently
        migrate amongst them, as individual servers can no longer be
        reached. A client's session will be maintined.
        """
        # Connect to the Zookeeper Cluster
        servers = ",".join([s.address for s in self.cluster])
        self.client = ZookeeperClient(servers)
        yield self.client.connect()
        yield self.client.create("/hello", flags=zookeeper.EPHEMERAL)

        # Shutdown the server the client is connected to
        self.cluster[0].stop()

        # Wait for the shutdown and cycle, if we don't wait we'll
        # get a zookeeper connectionloss exception on occassion.
        yield self.sleep(0.1)

        self.assertTrue(self.client.connected)
        exists = yield self.client.exists("/hello")
        self.assertTrue(exists)

    @inlineCallbacks
    def test_client_watch_migration(self):
        """On server rotation, extant watches are still active.

        A client connected to multiple servers, will transparently
        migrate amongst them, as individual servers can no longer be
        reached. Watch deferreds issued from the same client instance will
        continue to function as the session is maintained.
        """
        session_events = []

        def session_event_callback(connection, e):
            session_events.append(e)

        # Connect to the Zookeeper Cluster
        servers = ",".join([s.address for s in self.cluster])
        self.client = ZookeeperClient(servers)
        self.client.set_session_callback(session_event_callback)
        yield self.client.connect()

        # Setup a watch
        yield self.client.create("/hello")
        exists_d, watch_d = self.client.exists_and_watch("/hello")
        yield exists_d

        # Shutdown the server the client is connected to
        self.cluster[0].stop()

        # Wait for the shutdown and cycle, if we don't wait we'll
        # get occasionally get a  zookeeper connectionloss exception.
        yield self.sleep(0.1)

        # The session events that would have been ignored are sent
        # to the session event callback.
        self.assertTrue(session_events)
        self.assertTrue(self.client.connected)

        # If we delete the node, we'll see the watch fire.
        yield self.client.delete("/hello")
        event = yield watch_d
        self.assertEqual(event.type_name, "deleted")
        self.assertEqual(event.path, "/hello")

    @inlineCallbacks
    def test_connection_error_handler(self):
        """A callback can be specified for connection errors.

        We can specify a callback for connection errors, that
        can perform recovery for a disconnected client, restablishing
        """
        @inlineCallbacks
        def connection_error_handler(connection, error):
            # Moved management of this connection attribute out of the
            # default behavior for a connection exception, to support
            # the retry facade. Under the hood libzk is going to be
            # trying to transparently reconnect
            connection.connected = False

            # On loss of the connection, reconnect the client w/ same session.

            yield connection.connect(
                self.cluster[1].address, client_id=connection.client_id)
            returnValue(23)

        self.client = ZookeeperClient(self.cluster[0].address)
        self.client.set_connection_error_callback(connection_error_handler)
        yield self.client.connect()

        yield self.client.create("/hello")
        exists_d, watch_d = self.client.exists_and_watch("/hello")
        yield exists_d

        # Shutdown the server the client is connected to
        self.cluster[0].stop()
        yield self.sleep(0.1)

        # Results in connection loss exception, and invoking of error handler.
        result = yield self.client.exists("/hello")

        # The result of the error handler is returned to the api
        self.assertEqual(result, 23)

        exists = yield self.client.exists("/hello")
        self.assertTrue(exists)

    @inlineCallbacks
    def test_client_session_expiration_event(self):
        """A client which recieves a session expiration event.
        """
        session_events = []
        events_received = Deferred()

        def session_event_callback(connection, e):
            session_events.append(e)
            if len(session_events) == 8:
                events_received.callback(True)

        # Connect to a node in the cluster and establish a watch
        self.client = ZookeeperClient(self.cluster[0].address)
        self.client.set_session_callback(session_event_callback)
        yield self.client.connect()

        # Setup some watches to verify they are cleaned out on expiration.
        d, e_watch_d = self.client.exists_and_watch("/")
        yield d

        d, g_watch_d = self.client.get_and_watch("/")
        yield d

        d, c_watch_d = self.client.get_children_and_watch("/")
        yield d

        # Connect a client to the same session on a different node.
        self.client2 = ZookeeperClient(self.cluster[1].address)
        yield self.client2.connect(client_id=self.client.client_id)

        # Close the new client and wait for the event propogation
        yield self.client2.close()

        # It can take some time for this to propagate
        yield events_received
        self.assertEqual(len(session_events), 8)

        # The last four (conn + 3 watches) are all expired
        for evt in session_events[4:]:
            self.assertEqual(evt.state_name, "expired")

        # The connection is dead without reconnecting.
        yield self.assertFailure(
            self.client.exists("/"),
            NotConnectedException, ConnectionException)

        self.assertTrue(self.client.unrecoverable)
        yield self.assertFailure(e_watch_d, zookeeper.SessionExpiredException)
        yield self.assertFailure(g_watch_d, zookeeper.SessionExpiredException)
        yield self.assertFailure(c_watch_d, zookeeper.SessionExpiredException)

        # If a reconnect attempt is made with a dead session id
        print "reconnect"
        yield self.client.connect(client_id=self.client.client_id)
        yield self.assertFailure(
            self.client.get_children("/"),
            NotConnectedException, ConnectionException)

    test_client_session_expiration_event.timeout = 10

    @inlineCallbacks
    def test_client_reconnect_session_on_different_server(self):
        """On connection failure, An application can choose to use a
        new connection with which to reconnect to a different member
        of the zookeeper cluster, reacquiring the extant session.

        A large obvious caveat to using a new client instance rather
        than reconnecting the existing client, is that even though the
        session has outstanding watches, the watch callbacks/deferreds
        won't be active unless the client instance used to create them
        is connected.
        """
        session_events = []

        def session_event_callback(connection, e):
            session_events.append(e)

        # Connect to a node in the cluster and establish a watch
        self.client = ZookeeperClient(self.cluster[2].address)
        self.client.set_session_callback(session_event_callback)
        yield self.client.connect()

        yield self.client.create("/hello", flags=zookeeper.EPHEMERAL)
        self.assertTrue((yield self.client.exists("/hello")))

        # Shutdown the server the client is connected to
        self.cluster[2].stop()
        yield self.sleep(0.1)

        # Verify we got a session event regarding the down server
        self.assertTrue(session_events)

        # Open up a new connection to a different server with same session
        self.client2 = ZookeeperClient(self.cluster[0].address)
        yield self.client2.connect(client_id=self.client.client_id)

        # Close the old disconnected client
        self.client.close()

        # Verify the ephemeral still exists
        exists = yield self.client2.exists("/hello")
        self.assertTrue(exists)

        # Destroy the session and reconnect
        self.client2.close()
        yield self.client.connect(self.cluster[0].address)

        # Ephemeral is destroyed when the session closed.
        exists = yield self.client.exists("/hello")
        self.assertFalse(exists)
Beispiel #5
0
class CharmPublisherTest(RepositoryTestBase):
    @inlineCallbacks
    def setUp(self):
        super(CharmPublisherTest, self).setUp()
        zookeeper.set_debug_level(0)

        self.charm = CharmDirectory(self.sample_dir1)
        self.charm_id = local_charm_id(self.charm)
        self.charm_key = under.quote(self.charm_id)
        # provider storage key
        self.charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, self.charm.get_sha256()))

        self.client = ZookeeperClient(get_test_zookeeper_address())
        self.storage_dir = self.makeDir()
        self.storage = FileStorage(self.storage_dir)
        self.publisher = CharmPublisher(self.client, self.storage)

        yield self.client.connect()
        yield self.client.create("/charms")

    def tearDown(self):
        deleteTree("/", self.client.handle)
        self.client.close()
        super(CharmPublisherTest, self).tearDown()

    @inlineCallbacks
    def test_add_charm_and_publish(self):
        open_file_count = _count_open_files()
        yield self.publisher.add_charm(self.charm_id, self.charm)
        result = yield self.publisher.publish()
        self.assertEquals(_count_open_files(), open_file_count)

        children = yield self.client.get_children("/charms")
        self.assertEqual(children, [self.charm_key])
        fh = yield self.storage.get(self.charm_storage_key)
        bundle = CharmBundle(fh)
        self.assertEqual(self.charm.get_sha256(), bundle.get_sha256())

        self.assertEqual(
            result[0].bundle_url,
            "file://%s/%s" % (self.storage_dir, self.charm_storage_key))

    @inlineCallbacks
    def test_published_charm_sans_unicode(self):
        yield self.publisher.add_charm(self.charm_id, self.charm)
        yield self.publisher.publish()
        data, stat = yield self.client.get("/charms/%s" % self.charm_key)
        self.assertNotIn("unicode", data)

    @inlineCallbacks
    def test_add_charm_with_concurrent(self):
        """
        Publishing a charm, that has become published concurrent, after the
        add_charm, works fine. it will write to storage regardless. The use
        of a sha256 as part of the storage key is utilized to help ensure
        uniqueness of bits. The sha256 is also stored with the charm state.

        This relation betewen the charm state and the binary bits, helps
        guarantee the property that any published charm in zookeeper will use
        the binary bits that it was published with.
        """

        yield self.publisher.add_charm(self.charm_id, self.charm)

        concurrent_publisher = CharmPublisher(self.client, self.storage)

        charm = CharmDirectory(self.sample_dir1)
        yield concurrent_publisher.add_charm(self.charm_id, charm)

        yield self.publisher.publish()

        # modify the charm to create a conflict scenario
        self.makeFile("zebra", path=os.path.join(self.sample_dir1, "junk.txt"))

        # assert the charm now has a different sha post modification
        modified_charm_sha = charm.get_sha256()
        self.assertNotEqual(modified_charm_sha, self.charm.get_sha256())

        # verify publishing raises a stateerror
        def verify_failure(result):
            if not isinstance(result, Failure):
                self.fail("Should have raised state error")
            result.trap(StateChanged)
            return True

        yield concurrent_publisher.publish().addBoth(verify_failure)

        # verify the zk state
        charm_nodes = yield self.client.get_children("/charms")
        self.assertEqual(charm_nodes, [self.charm_key])

        content, stat = yield self.client.get("/charms/%s" % charm_nodes[0])

        # assert the checksum matches the initially published checksum
        self.assertEqual(yaml.load(content)["sha256"], self.charm.get_sha256())

        store_path = os.path.join(self.storage_dir, self.charm_storage_key)
        self.assertTrue(os.path.exists(store_path))

        # and the binary bits where stored
        modified_charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, modified_charm_sha))
        modified_store_path = os.path.join(self.storage_dir,
                                           modified_charm_storage_key)
        self.assertTrue(os.path.exists(modified_store_path))

    @inlineCallbacks
    def test_add_charm_with_concurrent_removal(self):
        """
        If a charm is published, and it detects that the charm exists
        already exists, it will attempt to retrieve the charm state to
        verify there is no checksum mismatch. If concurrently the charm
        is removed, the publisher should fail with a statechange error.
        """
        manager = self.mocker.patch(CharmStateManager)

        manager.get_charm_state(self.charm_id)
        self.mocker.passthrough()

        def match_charm_bundle(bundle):
            return isinstance(bundle, CharmBundle)

        def match_charm_url(url):
            return url.startswith("file://")

        manager.add_charm_state(self.charm_id, MATCH(match_charm_bundle),
                                MATCH(match_charm_url))
        self.mocker.result(fail(zookeeper.NodeExistsException()))

        manager.get_charm_state(self.charm_id)
        self.mocker.result(fail(zookeeper.NoNodeException()))
        self.mocker.replay()

        yield self.publisher.add_charm(self.charm_id, self.charm)
        yield self.failUnlessFailure(self.publisher.publish(), StateChanged)

    @inlineCallbacks
    def test_add_charm_already_known(self):
        """Adding an existing charm, is an effective noop, as its not added
        to the internal publisher queue.
        """
        # Do an initial publishing of the charm
        scheduled = yield self.publisher.add_charm(self.charm_id, self.charm)
        self.assertTrue(scheduled)
        result = yield self.publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)

        publisher = CharmPublisher(self.client, self.storage)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)

        result = yield publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)
        self.assertEqual(result[1].name, self.charm.metadata.name)