Exemplo n.º 1
0
    def setUp(self):
        yield super(UnitDeployerTest, self).setUp()
        self.output = self.capture_logging(level=logging.DEBUG)
        yield self.push_default_config()

        # Load the environment with the charm state and charm binary
        environment = self.config.get_default()
        provider = environment.get_machine_provider()
        storage = provider.get_file_storage()
        publisher = CharmPublisher(self.client, storage)
        yield publisher.add_charm(local_charm_id(self.charm), self.charm)
        self.charm_state, = yield publisher.publish()

        # Create a service from the charm, then add a unit and assign
        # it to a machine.
        self.service_state_manager = ServiceStateManager(self.client)
        self.machine_state_manager = MachineStateManager(self.client)
        self.service = yield self.service_state_manager.add_service_state(
            "myblog", self.charm_state, dummy_constraints)
        self.unit_state = yield self.service.add_unit_state()
        self.machine_state = yield self.machine_state_manager.\
            add_machine_state(series_constraints)
        yield self.unit_state.assign_to_machine(self.machine_state)

        # NOTE machine_id must be a str to use with one of the
        # deployment classes
        self.juju_dir = self.makeDir()
        self.unit_manager = UnitDeployer(
            self.client, str(self.machine_state.id), self.juju_dir)
        yield self.unit_manager.start()
Exemplo n.º 2
0
    def test_add_charm_with_concurrent(self):
        """
        Publishing a charm, that has become published concurrent, after the
        add_charm, works fine. it will write to storage regardless. The use
        of a sha256 as part of the storage key is utilized to help ensure
        uniqueness of bits. The sha256 is also stored with the charm state.

        This relation betewen the charm state and the binary bits, helps
        guarantee the property that any published charm in zookeeper will use
        the binary bits that it was published with.
        """

        yield self.publisher.add_charm(self.charm_id, self.charm)

        concurrent_publisher = CharmPublisher(self.client, self.storage)

        charm = CharmDirectory(self.sample_dir1)
        yield concurrent_publisher.add_charm(self.charm_id, charm)

        yield self.publisher.publish()

        # modify the charm to create a conflict scenario
        self.makeFile("zebra", path=os.path.join(self.sample_dir1, "junk.txt"))

        # assert the charm now has a different sha post modification
        modified_charm_sha = charm.get_sha256()
        self.assertNotEqual(modified_charm_sha, self.charm.get_sha256())

        # verify publishing raises a stateerror
        def verify_failure(result):
            if not isinstance(result, Failure):
                self.fail("Should have raised state error")
            result.trap(StateChanged)
            return True

        yield concurrent_publisher.publish().addBoth(verify_failure)

        # verify the zk state
        charm_nodes = yield self.client.get_children("/charms")
        self.assertEqual(charm_nodes, [self.charm_key])

        content, stat = yield self.client.get("/charms/%s" % charm_nodes[0])

        # assert the checksum matches the initially published checksum
        self.assertEqual(yaml.load(content)["sha256"], self.charm.get_sha256())

        store_path = os.path.join(self.storage_dir, self.charm_storage_key)
        self.assertTrue(os.path.exists(store_path))

        # and the binary bits where stored
        modified_charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, modified_charm_sha))
        modified_store_path = os.path.join(self.storage_dir,
                                           modified_charm_storage_key)
        self.assertTrue(os.path.exists(modified_store_path))
Exemplo n.º 3
0
    def test_add_charm_already_known(self):
        """Adding an existing charm, is an effective noop, as its not added
        to the internal publisher queue.
        """
        # Do an initial publishing of the charm
        scheduled = yield self.publisher.add_charm(self.charm_id, self.charm)
        self.assertTrue(scheduled)
        result = yield self.publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)

        publisher = CharmPublisher(self.client, self.storage)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)

        result = yield publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)
        self.assertEqual(result[1].name, self.charm.metadata.name)
Exemplo n.º 4
0
    def test_add_charm_already_known(self):
        """Adding an existing charm, is an effective noop, as its not added
        to the internal publisher queue.
        """
        # Do an initial publishing of the charm
        scheduled = yield self.publisher.add_charm(self.charm_id, self.charm)
        self.assertTrue(scheduled)
        result = yield self.publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)

        publisher = CharmPublisher(self.client, self.storage)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)

        result = yield publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)
        self.assertEqual(result[1].name, self.charm.metadata.name)
Exemplo n.º 5
0
    def test_add_charm_already_known(self):
        """Adding an existing charm, is an effective noop, as its not added
        to the internal publisher queue.
        """
        output = self.capture_logging("juju.charm")
        # Do an initial publishing of the charm
        scheduled = yield self.publisher.add_charm(self.charm_id, self.charm)
        self.assertTrue(scheduled)
        result = yield self.publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)

        publisher = CharmPublisher(self.client, self.storage)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)

        result = yield publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)
        self.assertEqual(result[1].name, self.charm.metadata.name)
        self.assertIn(
            "Using cached charm version of %s" % self.charm.metadata.name,
            output.getvalue())
Exemplo n.º 6
0
    def test_add_charm_with_concurrent(self):
        """
        Publishing a charm, that has become published concurrent, after the
        add_charm, works fine. it will write to storage regardless. The use
        of a sha256 as part of the storage key is utilized to help ensure
        uniqueness of bits. The sha256 is also stored with the charm state.

        This relation betewen the charm state and the binary bits, helps
        guarantee the property that any published charm in zookeeper will use
        the binary bits that it was published with.
        """

        yield self.publisher.add_charm(self.charm_id, self.charm)

        concurrent_publisher = CharmPublisher(
            self.client, self.storage)

        charm = CharmDirectory(self.sample_dir1)
        yield concurrent_publisher.add_charm(self.charm_id, charm)

        yield self.publisher.publish()

        # modify the charm to create a conflict scenario
        self.makeFile("zebra",
                      path=os.path.join(self.sample_dir1, "junk.txt"))

        # assert the charm now has a different sha post modification
        modified_charm_sha = charm.get_sha256()
        self.assertNotEqual(
            modified_charm_sha,
            self.charm.get_sha256())

        # verify publishing raises a stateerror
        def verify_failure(result):
            if not isinstance(result, Failure):
                self.fail("Should have raised state error")
            result.trap(StateChanged)
            return True

        yield concurrent_publisher.publish().addBoth(verify_failure)

        # verify the zk state
        charm_nodes = yield self.client.get_children("/charms")
        self.assertEqual(charm_nodes, [self.charm_key])

        content, stat = yield self.client.get(
            "/charms/%s" % charm_nodes[0])

        # assert the checksum matches the initially published checksum
        self.assertEqual(
            yaml.load(content)["sha256"], self.charm.get_sha256())

        store_path = os.path.join(self.storage_dir, self.charm_storage_key)
        self.assertTrue(os.path.exists(store_path))

        # and the binary bits where stored
        modified_charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, modified_charm_sha))
        modified_store_path = os.path.join(
            self.storage_dir, modified_charm_storage_key)
        self.assertTrue(os.path.exists(modified_store_path))
Exemplo n.º 7
0
class CharmPublisherTest(RepositoryTestBase):

    @inlineCallbacks
    def setUp(self):
        super(CharmPublisherTest, self).setUp()
        zookeeper.set_debug_level(0)

        self.charm = CharmDirectory(self.sample_dir1)
        self.charm_id = local_charm_id(self.charm)
        self.charm_key = under.quote(self.charm_id)
        # provider storage key
        self.charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, self.charm.get_sha256()))

        self.client = ZookeeperClient(get_test_zookeeper_address())
        self.storage_dir = self.makeDir()
        self.storage = FileStorage(self.storage_dir)
        self.publisher = CharmPublisher(self.client, self.storage)

        yield self.client.connect()
        yield self.client.create("/charms")

    def tearDown(self):
        deleteTree("/", self.client.handle)
        self.client.close()
        super(CharmPublisherTest, self).tearDown()

    @inlineCallbacks
    def test_add_charm_and_publish(self):
        open_file_count = _count_open_files()
        yield self.publisher.add_charm(self.charm_id, self.charm)
        result = yield self.publisher.publish()
        self.assertEquals(_count_open_files(), open_file_count)

        children = yield self.client.get_children("/charms")
        self.assertEqual(children, [self.charm_key])
        fh = yield self.storage.get(self.charm_storage_key)
        bundle = CharmBundle(fh)
        self.assertEqual(self.charm.get_sha256(), bundle.get_sha256())

        self.assertEqual(
            result[0].bundle_url, "file://%s/%s" % (
                self.storage_dir, self.charm_storage_key))

    @inlineCallbacks
    def test_published_charm_sans_unicode(self):
        yield self.publisher.add_charm(self.charm_id, self.charm)
        yield self.publisher.publish()
        data, stat = yield self.client.get("/charms/%s" % self.charm_key)
        self.assertNotIn("unicode", data)

    @inlineCallbacks
    def test_add_charm_with_concurrent(self):
        """
        Publishing a charm, that has become published concurrent, after the
        add_charm, works fine. it will write to storage regardless. The use
        of a sha256 as part of the storage key is utilized to help ensure
        uniqueness of bits. The sha256 is also stored with the charm state.

        This relation betewen the charm state and the binary bits, helps
        guarantee the property that any published charm in zookeeper will use
        the binary bits that it was published with.
        """

        yield self.publisher.add_charm(self.charm_id, self.charm)

        concurrent_publisher = CharmPublisher(
            self.client, self.storage)

        charm = CharmDirectory(self.sample_dir1)
        yield concurrent_publisher.add_charm(self.charm_id, charm)

        yield self.publisher.publish()

        # modify the charm to create a conflict scenario
        self.makeFile("zebra",
                      path=os.path.join(self.sample_dir1, "junk.txt"))

        # assert the charm now has a different sha post modification
        modified_charm_sha = charm.get_sha256()
        self.assertNotEqual(
            modified_charm_sha,
            self.charm.get_sha256())

        # verify publishing raises a stateerror
        def verify_failure(result):
            if not isinstance(result, Failure):
                self.fail("Should have raised state error")
            result.trap(StateChanged)
            return True

        yield concurrent_publisher.publish().addBoth(verify_failure)

        # verify the zk state
        charm_nodes = yield self.client.get_children("/charms")
        self.assertEqual(charm_nodes, [self.charm_key])

        content, stat = yield self.client.get(
            "/charms/%s" % charm_nodes[0])

        # assert the checksum matches the initially published checksum
        self.assertEqual(
            yaml.load(content)["sha256"], self.charm.get_sha256())

        store_path = os.path.join(self.storage_dir, self.charm_storage_key)
        self.assertTrue(os.path.exists(store_path))

        # and the binary bits where stored
        modified_charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, modified_charm_sha))
        modified_store_path = os.path.join(
            self.storage_dir, modified_charm_storage_key)
        self.assertTrue(os.path.exists(modified_store_path))

    @inlineCallbacks
    def test_add_charm_with_concurrent_removal(self):
        """
        If a charm is published, and it detects that the charm exists
        already exists, it will attempt to retrieve the charm state to
        verify there is no checksum mismatch. If concurrently the charm
        is removed, the publisher should fail with a statechange error.
        """
        manager = self.mocker.patch(CharmStateManager)

        manager.get_charm_state(self.charm_id)
        self.mocker.passthrough()

        def match_charm_bundle(bundle):
            return isinstance(bundle, CharmBundle)

        def match_charm_url(url):
            return url.startswith("file://")

        manager.add_charm_state(
            self.charm_id, MATCH(match_charm_bundle), MATCH(match_charm_url))
        self.mocker.result(fail(zookeeper.NodeExistsException()))

        manager.get_charm_state(self.charm_id)
        self.mocker.result(fail(zookeeper.NoNodeException()))
        self.mocker.replay()

        yield self.publisher.add_charm(self.charm_id, self.charm)
        yield self.failUnlessFailure(self.publisher.publish(), StateChanged)

    @inlineCallbacks
    def test_add_charm_already_known(self):
        """Adding an existing charm, is an effective noop, as its not added
        to the internal publisher queue.
        """
        # Do an initial publishing of the charm
        scheduled = yield self.publisher.add_charm(self.charm_id, self.charm)
        self.assertTrue(scheduled)
        result = yield self.publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)

        publisher = CharmPublisher(self.client, self.storage)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)

        result = yield publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)
        self.assertEqual(result[1].name, self.charm.metadata.name)
Exemplo n.º 8
0
def deploy(env_config, environment, repository_path, charm_name,
           service_name, log, config_file=None, num_units=1):
    """Deploy a charm within an environment.

    This will publish the charm to the environment, creating
    a service from the charm, and get it set to be launched
    on a new machine.
    """
    repo, charm_url = resolve(
        charm_name, repository_path, environment.default_series)

    # Validate config options prior to deployment attempt
    service_options = {}
    service_name = service_name or charm_url.name
    if config_file:
        service_options = parse_config_options(config_file, service_name)

    charm = yield repo.find(charm_url)
    charm_id = str(charm_url.with_revision(charm.get_revision()))

    provider = environment.get_machine_provider()
    placement_policy = provider.get_placement_policy()
    client = yield provider.connect()

    try:
        storage = yield provider.get_file_storage()
        service_manager = ServiceStateManager(client)
        environment_state_manager = EnvironmentStateManager(client)
        yield environment_state_manager.set_config_state(
            env_config, environment.name)

        # Publish the charm to juju
        publisher = CharmPublisher(client, storage)
        yield publisher.add_charm(charm_id, charm)
        result = yield publisher.publish()

        # In future we might have multiple charms be published at
        # the same time.  For now, extract the charm_state from the
        # list.
        charm_state = result[0]

        # Create the service state
        service_state = yield service_manager.add_service_state(
            service_name, charm_state)

        # Use the charm's ConfigOptions instance to validate service
        # options.. Invalid options passed will thrown an exception
        # and prevent the deploy.
        state = yield service_state.get_config()
        charm_config = yield charm_state.get_config()
        # return the validated options with the defaults included
        service_options = charm_config.validate(service_options)

        state.update(service_options)
        yield state.write()

        # Create desired number of service units
        for i in xrange(num_units):
            unit_state = yield service_state.add_unit_state()
            yield place_unit(client, placement_policy, unit_state)

        # Check if we have any peer relations to establish
        if charm.metadata.peers:
            relation_manager = RelationStateManager(client)
            for peer_name, peer_info in charm.metadata.peers.items():
                yield relation_manager.add_relation_state(
                    RelationEndpoint(service_name,
                                     peer_info["interface"],
                                     peer_name,
                                     "peer"))

        log.info("Charm deployed as service: %r", service_name)
    finally:
        yield client.close()
Exemplo n.º 9
0
def deploy(env_config, environment, repository_path, charm_name,
           service_name, log, constraint_strs, config_file=None, upgrade=False,
           num_units=1):
    """Deploy a charm within an environment.

    This will publish the charm to the environment, creating
    a service from the charm, and get it set to be launched
    on a new machine. If --repository is not specified, it
    will be taken from the environment variable JUJU_REPOSITORY.
    """
    repo, charm_url = resolve(
        charm_name, repository_path, environment.default_series)

    log.info("Searching for charm %s in %s" % (charm_url, repo))
    charm = yield repo.find(charm_url)
    if upgrade:
        if repo.type != "local" or charm.type != "dir":
            raise CharmError(
                charm.path,
                "Only local directory charms can be upgraded on deploy")
        charm.set_revision(charm.get_revision() + 1)

    charm_id = str(charm_url.with_revision(charm.get_revision()))

    # Validate config options prior to deployment attempt
    service_options = {}
    service_name = service_name or charm_url.name
    if config_file:
        service_options = parse_config_options(
            config_file, service_name, charm)

    charm = yield repo.find(charm_url)
    charm_id = str(charm_url.with_revision(charm.get_revision()))

    provider = environment.get_machine_provider()
    placement_policy = provider.get_placement_policy()
    constraint_set = yield provider.get_constraint_set()
    constraints = constraint_set.parse(constraint_strs)
    client = yield provider.connect()

    try:
        yield legacy.check_constraints(client, constraint_strs)
        yield legacy.check_environment(
            client, provider.get_legacy_config_keys())
        yield sync_environment_state(client, env_config, environment.name)

        # Publish the charm to juju
        storage = yield provider.get_file_storage()
        publisher = CharmPublisher(client, storage)
        yield publisher.add_charm(charm_id, charm)
        result = yield publisher.publish()

        # In future we might have multiple charms be published at
        # the same time.  For now, extract the charm_state from the
        # list.
        charm_state = result[0]

        # Create the service state
        service_manager = ServiceStateManager(client)
        service_state = yield service_manager.add_service_state(
            service_name, charm_state, constraints)

        # Use the charm's ConfigOptions instance to validate service
        # options.. Invalid options passed will thrown an exception
        # and prevent the deploy.
        state = yield service_state.get_config()
        charm_config = yield charm_state.get_config()
        # return the validated options with the defaults included
        service_options = charm_config.validate(service_options)
        state.update(service_options)
        yield state.write()

        # Create desired number of service units
        if (yield service_state.is_subordinate()):
            log.info("Subordinate %r awaiting relationship "
                     "to principal for deployment.", service_name)
        else:
            for i in xrange(num_units):
                unit_state = yield service_state.add_unit_state()
                yield place_unit(client, placement_policy, unit_state)

        # Check if we have any peer relations to establish
        if charm.metadata.peers:
            relation_manager = RelationStateManager(client)
            for peer_name, peer_info in charm.metadata.peers.items():
                yield relation_manager.add_relation_state(
                    RelationEndpoint(service_name,
                                     peer_info["interface"],
                                     peer_name,
                                     "peer"))

        log.info("Charm deployed as service: %r", service_name)
    finally:
        yield client.close()
Exemplo n.º 10
0
def upgrade_charm(
    config, environment, verbose, log, repository_path, service_name, dry_run):
    """Upgrades a service's charm.

    First determines if an upgrade is available, then updates the
    service charm reference, and marks the units as needing upgrades.
    """
    provider = environment.get_machine_provider()
    client = yield provider.connect()

    service_manager = ServiceStateManager(client)
    service_state = yield service_manager.get_service_state(service_name)
    old_charm_id = yield service_state.get_charm_id()

    old_charm_url = CharmURL.parse(old_charm_id)
    old_charm_url.assert_revision()
    repo, charm_url = resolve(
        str(old_charm_url.with_revision(None)),
        repository_path,
        environment.default_series)
    new_charm_url = charm_url.with_revision(
        (yield repo.latest(charm_url)))

    if charm_url.collection.schema == "local":
        if old_charm_url.revision >= new_charm_url.revision:
            new_revision = old_charm_url.revision + 1
            charm = yield repo.find(new_charm_url)
            if isinstance(charm, CharmDirectory):
                if dry_run:
                    log.info("%s would be set to revision %s",
                             charm.path, new_revision)
                else:
                    log.info("Setting %s to revision %s",
                             charm.path, new_revision)
                    charm.set_revision(new_revision)
                new_charm_url.revision = new_revision

    new_charm_id = str(new_charm_url)

    # Verify its newer than what's deployed
    if not new_charm_url.revision > old_charm_url.revision:
        if dry_run:
            log.info("Service already running latest charm %r", old_charm_id)
        else:
            raise NewerCharmNotFound(old_charm_id)
    elif dry_run:
        log.info("Service would be upgraded from charm %r to %r",
                 old_charm_id, new_charm_id)

    # On dry run, stop before modifying state.
    if not dry_run:
        # Publish the new charm
        storage = provider.get_file_storage()
        publisher = CharmPublisher(client, storage)
        charm = yield repo.find(new_charm_url)
        yield publisher.add_charm(new_charm_id, charm)
        result = yield publisher.publish()
        charm_state = result[0]

        # Update the service charm reference
        yield service_state.set_charm_id(charm_state.id)

    # Mark the units for upgrades
    units = yield service_state.get_all_unit_states()
    for unit in units:
        running, state = yield is_unit_running(client, unit)
        if not running:
            log.info(
                "Unit %r is not in a running state (state: %r), won't upgrade",
                unit.unit_name, state or "uninitialized")
            continue

        if not dry_run:
            yield unit.set_upgrade_flag()
Exemplo n.º 11
0
 def publish_charm(self, charm_path=sample_directory):
     charm = get_charm_from_path(charm_path)
     publisher = CharmPublisher(self.client, self.storage)
     yield publisher.add_charm(local_charm_id(charm), charm)
     charm_states = yield publisher.publish()
     returnValue((charm, charm_states[0]))
Exemplo n.º 12
0
class MachineAgentTest(AgentTestBase, RepositoryTestBase):

    agent_class = MachineAgent

    @inlineCallbacks
    def setUp(self):
        yield super(MachineAgentTest, self).setUp()

        self.output = self.capture_logging(level=logging.DEBUG)
        environment = self.config.get_default()

        # Load the environment with the charm state and charm binary
        self.provider = environment.get_machine_provider()
        self.storage = self.provider.get_file_storage()
        self.charm = CharmDirectory(self.sample_dir1)
        self.publisher = CharmPublisher(self.client, self.storage)
        yield self.publisher.add_charm(local_charm_id(self.charm), self.charm)

        charm_states = yield self.publisher.publish()
        self.charm_state = charm_states[0]

        # Create a service from the charm from which we can create units for
        # the machine.
        self.service_state_manager = ServiceStateManager(self.client)
        self.service = yield self.service_state_manager.add_service_state(
            "fatality-blog", self.charm_state, dummy_constraints)

    @inlineCallbacks
    def get_agent_config(self):
        # gets invoked by AgentTestBase.setUp
        options = yield super(MachineAgentTest, self).get_agent_config()
        machine_state_manager = MachineStateManager(self.client)

        self.machine_state = yield machine_state_manager.add_machine_state(
            series_constraints)

        self.change_environment(
            JUJU_MACHINE_ID="0",
            JUJU_HOME=self.juju_directory)
        options["machine_id"] = str(self.machine_state.id)

        # Start the agent with watching enabled
        returnValue(options)

    @inlineCallbacks
    def test_start_begins_watch_and_initializes_directories(self):
        self.agent.set_watch_enabled(True)
        mock_machine_state = self.mocker.patch(MachineState)
        mock_machine_state.watch_assigned_units(
            self.agent.watch_service_units)
        self.mocker.replay()
        yield self.agent.startService()

        self.assertTrue(os.path.isdir(self.agent.units_directory))
        self.assertTrue(os.path.isdir(self.agent.unit_state_directory))
        self.assertIn(
            "Machine agent started id:%s" % self.agent.get_machine_id(),
            self.output.getvalue())
        yield self.agent.stopService()

    def test_agent_machine_id_environment_extraction(self):
        self.change_args("es-agent")
        parser = argparse.ArgumentParser()
        self.agent.setup_options(parser)

        config = parser.parse_args(namespace=TwistedOptionNamespace())
        self.assertEqual(
            config["machine_id"], "0")

    def test_get_agent_name(self):
        self.assertEqual(self.agent.get_agent_name(), "Machine:0")

    def test_agent_machine_id_cli_error(self):
        """
        If the machine id can't be found, a detailed error message
        is given.
        """
        # initially setup by get_agent_config in setUp
        self.change_environment(JUJU_MACHINE_ID="")
        self.change_args("es-agent",
                         "--zookeeper-servers", get_test_zookeeper_address(),
                         "--juju-directory", self.makeDir(),
                         "--session-file", self.makeFile())
        parser = argparse.ArgumentParser()
        self.agent.setup_options(parser)
        options = parser.parse_args(namespace=TwistedOptionNamespace())

        e = self.assertRaises(
            JujuError,
            self.agent.configure,
            options)

        self.assertIn(
            ("--machine-id must be provided in the command line,"
            " or $JUJU_MACHINE_ID in the environment"),
            str(e))

    def test_agent_machine_id_cli_extraction(self):
        """Command line passing of machine id works and has precedence
        over environment arg passing."""
        self.change_environment(JUJU_MACHINE_ID=str(21))
        self.change_args("es-agent", "--machine-id", "0")

        parser = argparse.ArgumentParser()
        self.agent.setup_options(parser)

        config = parser.parse_args(namespace=TwistedOptionNamespace())
        self.assertEqual(
            config["machine_id"], "0")

    def test_machine_agent_knows_its_machine_id(self):
        self.assertEqual(self.agent.get_machine_id(), "0")

    @inlineCallbacks
    def test_watch_new_service_unit(self):
        """
        Adding a new service unit is detected by the watch.
        """
        from juju.unit.deploy import UnitDeployer
        mock_deployer = self.mocker.patch(UnitDeployer)
        mock_deployer.start_service_unit("fatality-blog/0")
        test_deferred = Deferred()

        def test_complete(service_name):
            test_deferred.callback(True)

        self.mocker.call(test_complete)
        self.mocker.replay()

        self.agent.set_watch_enabled(True)
        yield self.agent.startService()

        # Create a new service unit
        self.service_unit = yield self.service.add_unit_state()
        yield self.service_unit.assign_to_machine(self.machine_state)

        yield test_deferred
        self.assertIn(
            "Units changed old:set([]) new:set(['fatality-blog/0'])",
            self.output.getvalue())

    def test_watch_new_service_unit_error(self):
        """
        An error while starting a new service is logged
        """
        # Inject an error into the service deployment
        from juju.unit.deploy import UnitDeployer
        mock_deployer = self.mocker.patch(UnitDeployer)
        mock_deployer.start_service_unit("fatality-blog/0")
        self.mocker.result(fail(SyntaxError("Bad")))
        self.mocker.replay()

        yield self.agent.startService()
        yield self.agent.watch_service_units(None, set(["fatality-blog/0"]))
        self.assertIn("Starting service unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("Error starting unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("SyntaxError: Bad", self.output.getvalue())

    @inlineCallbacks
    def test_service_unit_removed(self):
        """
        Service unit removed with manual invocation of watch_service_units.
        """
        from juju.unit.deploy import UnitDeployer
        mock_deployer = self.mocker.patch(UnitDeployer)
        started = Deferred()
        mock_deployer.start_service_unit("fatality-blog/0")
        self.mocker.call(started.callback)
        stopped = Deferred()
        mock_deployer.kill_service_unit("fatality-blog/0")
        self.mocker.call(stopped.callback)
        self.mocker.replay()

        # Start the agent with watching enabled
        self.agent.set_watch_enabled(True)
        yield self.agent.startService()

        # Create a new service unit
        self.service_unit = yield self.service.add_unit_state()
        yield self.service_unit.assign_to_machine(self.machine_state)

        # Need to ensure no there's no concurrency creating an overlap
        # between assigning, unassigning to machine, since it is
        # possible then for the watch in the machine agent to not
        # observe *any* change in this case ("you cannot reliably see
        # every change that happens to a node in ZooKeeper")
        yield started

        # And now remove it
        yield self.service_unit.unassign_from_machine()
        yield stopped

    @inlineCallbacks
    def test_watch_removed_service_unit_error(self):
        """
        An error while removing a service unit is logged
        """
        from juju.unit.deploy import UnitDeployer
        mock_deployer = self.mocker.patch(UnitDeployer)
        mock_deployer.kill_service_unit("fatality-blog/0")
        self.mocker.result(fail(OSError("Bad")))
        self.mocker.replay()

        yield self.agent.startService()
        yield self.agent.watch_service_units(set(["fatality-blog/0"]), set())
        self.assertIn("Stopping service unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("Error stopping unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("OSError: Bad", self.output.getvalue())
Exemplo n.º 13
0
class MachineAgentTest(AgentTestBase, RepositoryTestBase):

    agent_class = MachineAgent

    @inlineCallbacks
    def setUp(self):
        yield super(MachineAgentTest, self).setUp()

        self.output = self.capture_logging(
            "juju.agents.machine", level=logging.DEBUG)

        config = self.get_test_environment_config()
        environment = config.get_default()

        # Store the environment to zookeeper
        environment_state_manager = EnvironmentStateManager(self.client)
        yield environment_state_manager.set_config_state(config, "myfirstenv")

        # Load the environment with the charm state and charm binary
        self.provider = environment.get_machine_provider()
        self.storage = self.provider.get_file_storage()
        self.charm = CharmDirectory(self.sample_dir1)
        self.publisher = CharmPublisher(self.client, self.storage)
        yield self.publisher.add_charm(local_charm_id(self.charm), self.charm)

        charm_states = yield self.publisher.publish()
        self.charm_state = charm_states[0]

        # Create a service from the charm from which we can create units for
        # the machine.
        self.service_state_manager = ServiceStateManager(self.client)
        self.service = yield self.service_state_manager.add_service_state(
            "fatality-blog", self.charm_state)

    def process_kill(self, pid):
        try:
            os.kill(pid, 9)
        except OSError:
            pass

    @inlineCallbacks
    def get_agent_config(self):
        # gets invoked by AgentTestBase.setUp
        options = yield super(MachineAgentTest, self).get_agent_config()
        machine_state_manager = MachineStateManager(self.client)

        self.machine_state = yield machine_state_manager.add_machine_state()

        self.change_environment(
            JUJU_MACHINE_ID="0",
            JUJU_HOME=self.juju_directory)
        options["machine_id"] = str(self.machine_state.id)

        # Start the agent with watching enabled
        returnValue(options)

    @inlineCallbacks
    def test_start_begins_watch_and_initializes_directories(self):
        self.agent.set_watch_enabled(True)
        mock_machine_state = self.mocker.patch(MachineState)
        mock_machine_state.watch_assigned_units(
            self.agent.watch_service_units)
        self.mocker.replay()
        yield self.agent.startService()

        self.assertTrue(os.path.isdir(self.agent.charms_directory))
        self.assertTrue(os.path.isdir(self.agent.units_directory))
        self.assertTrue(os.path.isdir(self.agent.unit_state_directory))
        self.assertIn(
            "Machine agent started id:%s" % self.agent.get_machine_id(),
            self.output.getvalue())
        yield self.agent.stopService()

    def test_agent_machine_id_environment_extraction(self):
        self.change_args("es-agent")
        parser = argparse.ArgumentParser()
        self.agent.setup_options(parser)

        config = parser.parse_args(namespace=TwistedOptionNamespace())
        self.assertEqual(
            config["machine_id"], "0")

    def test_get_agent_name(self):
        self.assertEqual(self.agent.get_agent_name(), "Machine:0")

    def test_agent_machine_id_cli_error(self):
        """
        If the machine id can't be found, a detailed error message
        is given.
        """
        # initially setup by get_agent_config in setUp
        self.change_environment(JUJU_MACHINE_ID="")
        self.change_args("es-agent",
                         "--zookeeper-servers",
                         get_test_zookeeper_address(),
                         "--juju-directory",
                         self.makeDir())
        parser = argparse.ArgumentParser()
        self.agent.setup_options(parser)
        options = parser.parse_args(namespace=TwistedOptionNamespace())

        e = self.assertRaises(
            JujuError,
            self.agent.configure,
            options)

        self.assertIn(
            ("--machine-id must be provided in the command line,"
            " or $JUJU_MACHINE_ID in the environment"),
            str(e))

    def test_agent_machine_id_cli_extraction(self):
        """Command line passing of machine id works and has precedence
        over environment arg passing."""
        self.change_environment(JUJU_MACHINE_ID=str(21))
        self.change_args("es-agent", "--machine-id", "0")

        parser = argparse.ArgumentParser()
        self.agent.setup_options(parser)

        config = parser.parse_args(namespace=TwistedOptionNamespace())
        self.assertEqual(
            config["machine_id"], "0")

    def test_machine_agent_knows_its_machine_id(self):
        self.assertEqual(self.agent.get_machine_id(), "0")

    @inlineCallbacks
    def test_charm_download(self):
        """
        Downloading a charm should store the charm locally.
        """
        yield self.agent.startService()
        yield self.agent.download_charm(self.charm_state)

        checksum = self.charm.get_sha256()
        charm_id = local_charm_id(self.charm)
        charm_key = under.quote("%s:%s" % (charm_id, checksum))
        charm_path = os.path.join(self.agent.charms_directory, charm_key)

        self.assertTrue(os.path.exists(charm_path))
        bundle = CharmBundle(charm_path)
        self.assertEquals(
            bundle.get_revision(), self.charm.get_revision())
        self.assertEquals(bundle.get_sha256(), checksum)
        self.assertIn(
            "Downloading charm %s" % charm_id, self.output.getvalue())

    @inlineCallbacks
    def test_watch_new_service_unit(self):
        """
        Adding a new service unit is detected by the watch.
        """
        self.agent.set_watch_enabled(True)
        yield self.agent.startService()

        mock_deployment = self.mocker.patch(self.agent.deploy_factory)
        mock_deployment.start("0", get_test_zookeeper_address(), MATCH_BUNDLE)

        test_deferred = Deferred()

        def test_complete(machine_id, servers, bundle):
            test_deferred.callback(True)

        self.mocker.call(test_complete)
        self.mocker.replay()

        # Create a new service unit
        self.service_unit = yield self.service.add_unit_state()
        yield self.service_unit.assign_to_machine(self.machine_state)

        yield test_deferred
        self.assertIn(
            "Units changed old:set([]) new:set(['fatality-blog/0'])",
            self.output.getvalue())

    @inlineCallbacks
    def test_watch_new_service_unit_error(self):
        """
        An error while starting a new service is logged
        """
        # Inject an error into the service deployment
        mock_agent = self.mocker.patch(self.agent)
        mock_agent.start_service_unit("fatality-blog/0")
        self.mocker.result(fail(SyntaxError("Bad")))
        self.mocker.replay()

        yield self.agent.watch_service_units(None, set(["fatality-blog/0"]))
        self.assertIn("Starting service unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("Error starting unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("SyntaxError: Bad", self.output.getvalue())

    @inlineCallbacks
    def test_service_unit_removed(self):
        """
        Service unit removed with manual invocation of watch_service_units.
        """
        # Start the agent with watching enabled
        self.agent.set_watch_enabled(True)
        yield self.agent.startService()

        test_deferred = Deferred()

        mock_deployment = self.mocker.patch(self.agent.deploy_factory)
        mock_deployment.start("0", get_test_zookeeper_address(), MATCH_BUNDLE)
        self.mocker.result(succeed(True))
        mock_deployment.destroy()
        self.mocker.result(succeed(True))

        def test_complete():
            test_deferred.callback(True)

        self.mocker.call(test_complete)
        self.mocker.replay()

        # Create a new service unit
        self.service_unit = yield self.service.add_unit_state()
        yield self.service_unit.assign_to_machine(self.machine_state)

        # And now remove it
        yield self.service_unit.unassign_from_machine()
        yield test_deferred

    @inlineCallbacks
    def test_watch_removed_service_unit_error(self):
        """
        An error while removing a service unit is logged
        """
        mock_agent = self.mocker.patch(self.agent)
        mock_agent.kill_service_unit("fatality-blog/0")
        self.mocker.result(fail(OSError("Bad")))
        self.mocker.replay()

        yield self.agent.watch_service_units(set(["fatality-blog/0"]), set())
        self.assertIn("Stopping service unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("Error stopping unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("OSError: Bad", self.output.getvalue())
Exemplo n.º 14
0
class MachineAgentTest(AgentTestBase, RepositoryTestBase):

    agent_class = MachineAgent

    @inlineCallbacks
    def setUp(self):
        yield super(MachineAgentTest, self).setUp()

        self.output = self.capture_logging("juju.agents.machine",
                                           level=logging.DEBUG)

        config = self.get_test_environment_config()
        environment = config.get_default()

        # Store the environment to zookeeper
        environment_state_manager = EnvironmentStateManager(self.client)
        yield environment_state_manager.set_config_state(config, "myfirstenv")

        # Load the environment with the charm state and charm binary
        self.provider = environment.get_machine_provider()
        self.storage = self.provider.get_file_storage()
        self.charm = CharmDirectory(self.sample_dir1)
        self.publisher = CharmPublisher(self.client, self.storage)
        yield self.publisher.add_charm(local_charm_id(self.charm), self.charm)

        charm_states = yield self.publisher.publish()
        self.charm_state = charm_states[0]

        # Create a service from the charm from which we can create units for
        # the machine.
        self.service_state_manager = ServiceStateManager(self.client)
        self.service = yield self.service_state_manager.add_service_state(
            "fatality-blog", self.charm_state)

    def process_kill(self, pid):
        try:
            os.kill(pid, 9)
        except OSError:
            pass

    @inlineCallbacks
    def get_agent_config(self):
        # gets invoked by AgentTestBase.setUp
        options = yield super(MachineAgentTest, self).get_agent_config()
        machine_state_manager = MachineStateManager(self.client)

        self.machine_state = yield machine_state_manager.add_machine_state()

        self.change_environment(JUJU_MACHINE_ID="0",
                                JUJU_HOME=self.juju_directory)
        options["machine_id"] = str(self.machine_state.id)

        # Start the agent with watching enabled
        returnValue(options)

    @inlineCallbacks
    def test_start_begins_watch_and_initializes_directories(self):
        self.agent.set_watch_enabled(True)
        mock_machine_state = self.mocker.patch(MachineState)
        mock_machine_state.watch_assigned_units(self.agent.watch_service_units)
        self.mocker.replay()
        yield self.agent.startService()

        self.assertTrue(os.path.isdir(self.agent.charms_directory))
        self.assertTrue(os.path.isdir(self.agent.units_directory))
        self.assertTrue(os.path.isdir(self.agent.unit_state_directory))
        self.assertIn(
            "Machine agent started id:%s" % self.agent.get_machine_id(),
            self.output.getvalue())
        yield self.agent.stopService()

    def test_agent_machine_id_environment_extraction(self):
        self.change_args("es-agent")
        parser = argparse.ArgumentParser()
        self.agent.setup_options(parser)

        config = parser.parse_args(namespace=TwistedOptionNamespace())
        self.assertEqual(config["machine_id"], "0")

    def test_get_agent_name(self):
        self.assertEqual(self.agent.get_agent_name(), "Machine:0")

    def test_agent_machine_id_cli_error(self):
        """
        If the machine id can't be found, a detailed error message
        is given.
        """
        # initially setup by get_agent_config in setUp
        self.change_environment(JUJU_MACHINE_ID="")
        self.change_args("es-agent", "--zookeeper-servers",
                         get_test_zookeeper_address(), "--juju-directory",
                         self.makeDir())
        parser = argparse.ArgumentParser()
        self.agent.setup_options(parser)
        options = parser.parse_args(namespace=TwistedOptionNamespace())

        e = self.assertRaises(JujuError, self.agent.configure, options)

        self.assertIn(("--machine-id must be provided in the command line,"
                       " or $JUJU_MACHINE_ID in the environment"), str(e))

    def test_agent_machine_id_cli_extraction(self):
        """Command line passing of machine id works and has precedence
        over environment arg passing."""
        self.change_environment(JUJU_MACHINE_ID=str(21))
        self.change_args("es-agent", "--machine-id", "0")

        parser = argparse.ArgumentParser()
        self.agent.setup_options(parser)

        config = parser.parse_args(namespace=TwistedOptionNamespace())
        self.assertEqual(config["machine_id"], "0")

    def test_machine_agent_knows_its_machine_id(self):
        self.assertEqual(self.agent.get_machine_id(), "0")

    @inlineCallbacks
    def test_charm_download(self):
        """
        Downloading a charm should store the charm locally.
        """
        yield self.agent.startService()
        yield self.agent.download_charm(self.charm_state)

        checksum = self.charm.get_sha256()
        charm_id = local_charm_id(self.charm)
        charm_key = under.quote("%s:%s" % (charm_id, checksum))
        charm_path = os.path.join(self.agent.charms_directory, charm_key)

        self.assertTrue(os.path.exists(charm_path))
        bundle = CharmBundle(charm_path)
        self.assertEquals(bundle.get_revision(), self.charm.get_revision())
        self.assertEquals(bundle.get_sha256(), checksum)
        self.assertIn("Downloading charm %s" % charm_id,
                      self.output.getvalue())

    @inlineCallbacks
    def test_watch_new_service_unit(self):
        """
        Adding a new service unit is detected by the watch.
        """
        self.agent.set_watch_enabled(True)
        yield self.agent.startService()

        mock_deployment = self.mocker.patch(self.agent.deploy_factory)
        mock_deployment.start("0", get_test_zookeeper_address(), MATCH_BUNDLE)

        test_deferred = Deferred()

        def test_complete(machine_id, servers, bundle):
            test_deferred.callback(True)

        self.mocker.call(test_complete)
        self.mocker.replay()

        # Create a new service unit
        self.service_unit = yield self.service.add_unit_state()
        yield self.service_unit.assign_to_machine(self.machine_state)

        yield test_deferred
        self.assertIn("Units changed old:set([]) new:set(['fatality-blog/0'])",
                      self.output.getvalue())

    @inlineCallbacks
    def test_watch_new_service_unit_error(self):
        """
        An error while starting a new service is logged
        """
        # Inject an error into the service deployment
        mock_agent = self.mocker.patch(self.agent)
        mock_agent.start_service_unit("fatality-blog/0")
        self.mocker.result(fail(SyntaxError("Bad")))
        self.mocker.replay()

        yield self.agent.watch_service_units(None, set(["fatality-blog/0"]))
        self.assertIn("Starting service unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("Error starting unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("SyntaxError: Bad", self.output.getvalue())

    @inlineCallbacks
    def test_service_unit_removed(self):
        """
        Service unit removed with manual invocation of watch_service_units.
        """
        # Start the agent with watching enabled
        self.agent.set_watch_enabled(True)
        yield self.agent.startService()

        test_deferred = Deferred()

        mock_deployment = self.mocker.patch(self.agent.deploy_factory)
        mock_deployment.start("0", get_test_zookeeper_address(), MATCH_BUNDLE)
        self.mocker.result(succeed(True))
        mock_deployment.destroy()
        self.mocker.result(succeed(True))

        def test_complete():
            test_deferred.callback(True)

        self.mocker.call(test_complete)
        self.mocker.replay()

        # Create a new service unit
        self.service_unit = yield self.service.add_unit_state()
        yield self.service_unit.assign_to_machine(self.machine_state)

        # And now remove it
        yield self.service_unit.unassign_from_machine()
        yield test_deferred

    @inlineCallbacks
    def test_watch_removed_service_unit_error(self):
        """
        An error while removing a service unit is logged
        """
        mock_agent = self.mocker.patch(self.agent)
        mock_agent.kill_service_unit("fatality-blog/0")
        self.mocker.result(fail(OSError("Bad")))
        self.mocker.replay()

        yield self.agent.watch_service_units(set(["fatality-blog/0"]), set())
        self.assertIn("Stopping service unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("Error stopping unit: %s" % "fatality-blog/0",
                      self.output.getvalue())
        self.assertIn("OSError: Bad", self.output.getvalue())
Exemplo n.º 15
0
class CharmPublisherTest(RepositoryTestBase):
    @inlineCallbacks
    def setUp(self):
        super(CharmPublisherTest, self).setUp()
        zookeeper.set_debug_level(0)

        self.charm = CharmDirectory(self.sample_dir1)
        self.charm_id = local_charm_id(self.charm)
        self.charm_key = under.quote(self.charm_id)
        # provider storage key
        self.charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, self.charm.get_sha256()))

        self.client = ZookeeperClient(get_test_zookeeper_address())
        self.storage_dir = self.makeDir()
        self.storage = FileStorage(self.storage_dir)
        self.publisher = CharmPublisher(self.client, self.storage)

        yield self.client.connect()
        yield self.client.create("/charms")

    def tearDown(self):
        deleteTree("/", self.client.handle)
        self.client.close()
        super(CharmPublisherTest, self).tearDown()

    @inlineCallbacks
    def test_add_charm_and_publish(self):
        open_file_count = _count_open_files()
        yield self.publisher.add_charm(self.charm_id, self.charm)
        result = yield self.publisher.publish()
        self.assertEquals(_count_open_files(), open_file_count)

        children = yield self.client.get_children("/charms")
        self.assertEqual(children, [self.charm_key])
        fh = yield self.storage.get(self.charm_storage_key)
        bundle = CharmBundle(fh)
        self.assertEqual(self.charm.get_sha256(), bundle.get_sha256())

        self.assertEqual(
            result[0].bundle_url,
            "file://%s/%s" % (self.storage_dir, self.charm_storage_key))

    @inlineCallbacks
    def test_published_charm_sans_unicode(self):
        yield self.publisher.add_charm(self.charm_id, self.charm)
        yield self.publisher.publish()
        data, stat = yield self.client.get("/charms/%s" % self.charm_key)
        self.assertNotIn("unicode", data)

    @inlineCallbacks
    def test_add_charm_with_concurrent(self):
        """
        Publishing a charm, that has become published concurrent, after the
        add_charm, works fine. it will write to storage regardless. The use
        of a sha256 as part of the storage key is utilized to help ensure
        uniqueness of bits. The sha256 is also stored with the charm state.

        This relation betewen the charm state and the binary bits, helps
        guarantee the property that any published charm in zookeeper will use
        the binary bits that it was published with.
        """

        yield self.publisher.add_charm(self.charm_id, self.charm)

        concurrent_publisher = CharmPublisher(self.client, self.storage)

        charm = CharmDirectory(self.sample_dir1)
        yield concurrent_publisher.add_charm(self.charm_id, charm)

        yield self.publisher.publish()

        # modify the charm to create a conflict scenario
        self.makeFile("zebra", path=os.path.join(self.sample_dir1, "junk.txt"))

        # assert the charm now has a different sha post modification
        modified_charm_sha = charm.get_sha256()
        self.assertNotEqual(modified_charm_sha, self.charm.get_sha256())

        # verify publishing raises a stateerror
        def verify_failure(result):
            if not isinstance(result, Failure):
                self.fail("Should have raised state error")
            result.trap(StateChanged)
            return True

        yield concurrent_publisher.publish().addBoth(verify_failure)

        # verify the zk state
        charm_nodes = yield self.client.get_children("/charms")
        self.assertEqual(charm_nodes, [self.charm_key])

        content, stat = yield self.client.get("/charms/%s" % charm_nodes[0])

        # assert the checksum matches the initially published checksum
        self.assertEqual(yaml.load(content)["sha256"], self.charm.get_sha256())

        store_path = os.path.join(self.storage_dir, self.charm_storage_key)
        self.assertTrue(os.path.exists(store_path))

        # and the binary bits where stored
        modified_charm_storage_key = under.quote(
            "%s:%s" % (self.charm_id, modified_charm_sha))
        modified_store_path = os.path.join(self.storage_dir,
                                           modified_charm_storage_key)
        self.assertTrue(os.path.exists(modified_store_path))

    @inlineCallbacks
    def test_add_charm_with_concurrent_removal(self):
        """
        If a charm is published, and it detects that the charm exists
        already exists, it will attempt to retrieve the charm state to
        verify there is no checksum mismatch. If concurrently the charm
        is removed, the publisher should fail with a statechange error.
        """
        manager = self.mocker.patch(CharmStateManager)

        manager.get_charm_state(self.charm_id)
        self.mocker.passthrough()

        def match_charm_bundle(bundle):
            return isinstance(bundle, CharmBundle)

        def match_charm_url(url):
            return url.startswith("file://")

        manager.add_charm_state(self.charm_id, MATCH(match_charm_bundle),
                                MATCH(match_charm_url))
        self.mocker.result(fail(zookeeper.NodeExistsException()))

        manager.get_charm_state(self.charm_id)
        self.mocker.result(fail(zookeeper.NoNodeException()))
        self.mocker.replay()

        yield self.publisher.add_charm(self.charm_id, self.charm)
        yield self.failUnlessFailure(self.publisher.publish(), StateChanged)

    @inlineCallbacks
    def test_add_charm_already_known(self):
        """Adding an existing charm, is an effective noop, as its not added
        to the internal publisher queue.
        """
        # Do an initial publishing of the charm
        scheduled = yield self.publisher.add_charm(self.charm_id, self.charm)
        self.assertTrue(scheduled)
        result = yield self.publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)

        publisher = CharmPublisher(self.client, self.storage)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)
        scheduled = yield publisher.add_charm(self.charm_id, self.charm)
        self.assertFalse(scheduled)

        result = yield publisher.publish()
        self.assertEqual(result[0].name, self.charm.metadata.name)
        self.assertEqual(result[1].name, self.charm.metadata.name)
Exemplo n.º 16
0
 def publish_charm(self, charm_path=sample_directory):
     charm = get_charm_from_path(charm_path)
     publisher = CharmPublisher(self.client, self.storage)
     yield publisher.add_charm(local_charm_id(charm), charm)
     charm_states = yield publisher.publish()
     returnValue((charm, charm_states[0]))