def test_add_unit(self): """ 'juju add-unit <service_name>' will add a new service unit of the given service. """ unit_names = yield self.service_state1.get_unit_names() self.assertEqual(len(unit_names), 1) finished = self.setup_cli_reactor() self.setup_exit(0) self.mocker.replay() # trash environment to check syncing yield self.client.delete("/environment") main(["add-unit", "mysql"]) yield finished # verify the env state was synced esm = EnvironmentStateManager(self.client) yield esm.get_config() # verify the unit and its machine assignment. unit_names = yield self.service_state1.get_unit_names() self.assertEqual(len(unit_names), 2) topology = yield self.get_topology() unit = yield self.service_state1.get_unit_state("mysql/1") machine_id = topology.get_service_unit_machine(self.service_state1.internal_id, unit.internal_id) self.assertNotEqual(machine_id, None) self.assertIn("Unit 'mysql/1' added to service 'mysql'", self.output.getvalue()) yield self.assert_machine_assignments("mysql", [1, 2])
def setUp(self): yield super(MachineAgentTest, self).setUp() self.output = self.capture_logging("juju.agents.machine", level=logging.DEBUG) config = self.get_test_environment_config() environment = config.get_default() # Store the environment to zookeeper environment_state_manager = EnvironmentStateManager(self.client) yield environment_state_manager.set_config_state(config, "myfirstenv") # Load the environment with the charm state and charm binary self.provider = environment.get_machine_provider() self.storage = self.provider.get_file_storage() self.charm = CharmDirectory(self.sample_dir1) self.publisher = CharmPublisher(self.client, self.storage) yield self.publisher.add_charm(local_charm_id(self.charm), self.charm) charm_states = yield self.publisher.publish() self.charm_state = charm_states[0] # Create a service from the charm from which we can create units for # the machine. self.service_state_manager = ServiceStateManager(self.client) self.service = yield self.service_state_manager.add_service_state( "fatality-blog", self.charm_state)
def setUp(self): yield super(MachineAgentTest, self).setUp() self.output = self.capture_logging( "juju.agents.machine", level=logging.DEBUG) config = self.get_test_environment_config() environment = config.get_default() # Store the environment to zookeeper environment_state_manager = EnvironmentStateManager(self.client) yield environment_state_manager.set_config_state(config, "myfirstenv") # Load the environment with the charm state and charm binary self.provider = environment.get_machine_provider() self.storage = self.provider.get_file_storage() self.charm = CharmDirectory(self.sample_dir1) self.publisher = CharmPublisher(self.client, self.storage) yield self.publisher.add_charm(local_charm_id(self.charm), self.charm) charm_states = yield self.publisher.publish() self.charm_state = charm_states[0] # Create a service from the charm from which we can create units for # the machine. self.service_state_manager = ServiceStateManager(self.client) self.service = yield self.service_state_manager.add_service_state( "fatality-blog", self.charm_state)
def test_terminate_unused_machine(self): """Verify a typical allocation, unassignment, and then termination.""" wait_on_reactor_stopped = self.setup_cli_reactor() self.setup_exit(0) self.mocker.replay() wordpress_service_state = \ yield self.add_service_from_charm("wordpress") wordpress_unit_state = yield wordpress_service_state.add_unit_state() wordpress_machine_state = yield self.add_machine_state() yield wordpress_unit_state.assign_to_machine(wordpress_machine_state) riak_service_state = yield self.add_service_from_charm("riak") riak_unit_state = yield riak_service_state.add_unit_state() riak_machine_state = yield self.add_machine_state() yield riak_unit_state.assign_to_machine(riak_machine_state) mysql_service_state = yield self.add_service_from_charm("mysql") mysql_unit_state = yield mysql_service_state.add_unit_state() mysql_machine_state = yield self.add_machine_state() yield mysql_unit_state.assign_to_machine(mysql_machine_state) yield wordpress_unit_state.unassign_from_machine() yield mysql_unit_state.unassign_from_machine() yield self.assert_machine_states([0, 1, 2, 3], []) # trash environment to check syncing yield self.client.delete("/environment") main(["terminate-machine", "1", "3"]) yield wait_on_reactor_stopped # check environment synced esm = EnvironmentStateManager(self.client) yield esm.get_config() self.assertIn( "Machines terminated: 1, 3", self.output.getvalue()) yield self.assert_machine_states([0, 2], [1, 3])
def constraints_set(env_config, environment, service_name, constraint_strs): """ Machine constraints allow you to pick the hardware to which your services will be deployed. Examples: $ juju set-constraints --service-name mysql mem=8G cpu=4 $ juju set-constraints instance-type=t1.micro Available constraints vary by provider type, and will be ignored if not understood by the current environment's provider. The current set of available constraints across all providers is: On Amazon EC2: * arch (CPU architecture: i386/amd64/arm; amd64 by default) * cpu (processing power in Amazon ECU; 1 by default) * mem (memory in [MGT]iB; 512M by default) * instance-type (unset by default) * ec2-zone (unset by default) On Orchestra: * orchestra-classes (unset by default) On MAAS: * maas-name (unset by default) Service settings, if specified, will override environment settings, which will in turn override the juju defaults of mem=512M, cpu=1, arch=amd64. New constraints set on an entity will completely replace that entity's pre-existing constraints. To override an environment constraint with the juju default when setting service constraints, just specify "name=" (rather than just not specifying the constraint at all, which will cause it to inherit the environment's value). To entirely unset a constraint, specify "name=any". """ provider = environment.get_machine_provider() constraint_set = yield provider.get_constraint_set() constraints = constraint_set.parse(constraint_strs) client = yield provider.connect() try: yield legacy.check_constraints(client, constraint_strs) yield sync_environment_state(client, env_config, environment.name) if service_name is None: esm = EnvironmentStateManager(client) yield esm.set_constraints(constraints) else: ssm = ServiceStateManager(client) service = yield ssm.get_service_state(service_name) yield service.set_constraints(constraints) finally: yield client.close()
def get_constraints(self): """Get the machine's hardware constraints""" # Note: machine constraints should not be settable; they're a snapshot # of the constraints of the unit state for which they were created. (It # makes no sense to arbitrarily declare that an m1.small is now a # cc2.8xlarge, anyway.) esm = EnvironmentStateManager(self._client) constraint_set = yield esm.get_constraint_set() data = yield self._get_node_value("constraints", {}) returnValue(constraint_set.load(data))
def test_deploy_sends_environment(self): """Uses charm name as service name if possible.""" environment = self.config.get("firstenv") yield deploy.deploy( self.config, environment, self.unbundled_repo_path, "local:sample", None, logging.getLogger("deploy"), []) env_state_manager = EnvironmentStateManager(self.client) env_config = yield env_state_manager.get_config() self.assertEquals(yaml.load(env_config.serialize("firstenv")), yaml.load(self.config.serialize("firstenv")))
def test_deploy_sends_environment(self): """Uses charm name as service name if possible.""" environment = self.config.get("firstenv") yield deploy.deploy(self.config, environment, self.unbundled_repo_path, "local:sample", None, logging.getLogger("deploy")) env_state_manager = EnvironmentStateManager(self.client) env_config = yield env_state_manager.get_config() self.assertEquals(yaml.load(env_config.serialize("firstenv")), yaml.load(self.config.serialize("firstenv")))
def setUp(self): yield super(CharmPublisherTestBase, self).setUp() config = self.get_test_environment_config() environment = config.get_default() # Store the environment to zookeeper environment_state_manager = EnvironmentStateManager(self.client) yield environment_state_manager.set_config_state(config, "myfirstenv") # Load the environment self.provider = environment.get_machine_provider() self.storage = self.provider.get_file_storage()
def test_environment_constraint(self): yield self.client.delete("/environment") finished = self.setup_cli_reactor() self.setup_exit(0) self.mocker.replay() main(["set-constraints", "arch=arm", "cpu=any"]) yield finished esm = EnvironmentStateManager(self.client) yield esm.get_config() constraints = yield esm.get_constraints() self.assertEquals(constraints, { "ubuntu-series": None, "provider-type": "dummy", "arch": "arm", "cpu": None, "mem": 512.0})
def sync_environment_state(client, config, name): """Push the local environment config to zookeeper. This needs to be done: * On any command which can cause the provisioning agent to take action against the provider (ie create/destroy a machine), because the PA needs to use credentials stored in the environment config to do so. * On any command which uses constraints-related code (even if indirectly) because Constraints objects are provider-specific, and need to be created with the help of a MachineProvider; and the only way state code can get a MachineProvider is by getting one from ZK (we certainly don't want to thread the relevant provider from juju.control and/or the PA itself all the way through the state code). So, we sync, to ensure that state code can use an EnvironmentStateManager to get a provider. """ esm = EnvironmentStateManager(client) return esm.set_config_state(config, name)
def test_initialize(self): yield self.layout.initialize() yield self.assert_existence_and_acl("/charms") yield self.assert_existence_and_acl("/services") yield self.assert_existence_and_acl("/units") yield self.assert_existence_and_acl("/machines") yield self.assert_existence_and_acl("/relations") yield self.assert_existence_and_acl("/initialized") # To check that the constraints landed correctly, we need the # environment config to have been sent, or we won't be able to # get a provider to help us construct the appropriate objects. yield self.push_default_config(with_constraints=False) esm = EnvironmentStateManager(self.client) env_constraints = yield esm.get_constraints() self.assertEquals(env_constraints, { "provider-type": "dummy", "ubuntu-series": None, "arch": "arm", "cpu": None, "mem": 512}) machine_state_manager = MachineStateManager(self.client) machine_state = yield machine_state_manager.get_machine_state(0) machine_constraints = yield machine_state.get_constraints() self.assertTrue(machine_constraints.complete) self.assertEquals(machine_constraints, { "provider-type": "dummy", "ubuntu-series": "cranky", "arch": "arm", "cpu": None, "mem": 512}) instance_id = yield machine_state.get_instance_id() self.assertEqual(instance_id, "i-abcdef") settings_manager = GlobalSettingsStateManager(self.client) self.assertEqual((yield settings_manager.get_provider_type()), "dummy") self.assertEqual( self.log.getvalue().strip(), "Initializing zookeeper hierarchy")
def setUp(self): yield super(ConstraintsGetTest, self).setUp() env_constraints = dummy_cs.parse(["mem=1024"]) esm = EnvironmentStateManager(self.client) yield esm.set_constraints(env_constraints) self.expect_env = { "arch": "amd64", "cpu": 1.0, "mem": 1024.0, "provider-type": "dummy", "ubuntu-series": None} service_constraints = dummy_cs.parse(["cpu=10"]) service = yield self.add_service_from_charm( "mysql", constraints=service_constraints) # unit will snapshot the state of service when added unit = yield service.add_unit_state() self.expect_unit = { "arch": "amd64", "cpu": 10.0, "mem": 1024.0, "provider-type": "dummy", "ubuntu-series": "series"} # machine gets its own constraints machine_constraints = dummy_cs.parse(["cpu=15", "mem=8G"]) machine = yield self.add_machine_state( constraints=machine_constraints.with_series("series")) self.expect_machine = { "arch": "amd64", "cpu": 15.0, "mem": 8192.0, "provider-type": "dummy", "ubuntu-series": "series"} yield unit.assign_to_machine(machine) # service gets new constraints, leaves unit untouched yield service.set_constraints(dummy_cs.parse(["mem=16G"])) self.expect_service = { "arch": "amd64", "cpu": 1.0, "mem": 16384.0, "provider-type": "dummy", "ubuntu-series": "series"} self.log = self.capture_logging() self.stdout = self.capture_stream("stdout") self.finished = self.setup_cli_reactor() self.setup_exit(0) self.mocker.replay()
def constraints_get(env_config, environment, entity_names, log): """ Show the complete set of applicable constraints for each specified entity. This will show the final computed values of all constraints (including internal constraints which cannot be set directly via set-constraints). """ provider = environment.get_machine_provider() client = yield provider.connect() result = {} try: yield sync_environment_state(client, env_config, environment.name) if entity_names: msm = MachineStateManager(client) ssm = ServiceStateManager(client) for name in entity_names: if name.isdigit(): kind = "machine" entity = yield msm.get_machine_state(name) elif "/" in name: kind = "service unit" entity = yield ssm.get_unit_state(name) else: kind = "service" entity = yield ssm.get_service_state(name) log.info("Fetching constraints for %s %s", kind, name) constraints = yield entity.get_constraints() result[name] = dict(constraints) else: esm = EnvironmentStateManager(client) log.info("Fetching constraints for environment") constraints = yield esm.get_constraints() result = dict(constraints) yaml.safe_dump(result, sys.stdout) finally: yield client.close()
class EnvironmentStateManagerTest(StateTestBase, EnvironmentsConfigTestBase): @inlineCallbacks def setUp(self): yield super(EnvironmentStateManagerTest, self).setUp() self.environment_state_manager = EnvironmentStateManager(self.client) self.write_config(SAMPLE_ENV) self.config.load() @inlineCallbacks def tearDown(self): yield super(EnvironmentStateManagerTest, self).tearDown() @inlineCallbacks def test_set_config_state(self): """ The simplest thing the manager can do is serialize a given environment and save it in zookeeper. """ manager = self.environment_state_manager yield manager.set_config_state(self.config, "myfirstenv") serialized = self.config.serialize("myfirstenv") content, stat = yield self.client.get("/environment") self.assertEquals(yaml.load(content), yaml.load(serialized)) @inlineCallbacks def test_set_config_state_replaces_environment(self): """ Setting the environment should also work with an existing environment. """ yield self.client.create("/environment", "Replace me!") manager = self.environment_state_manager yield manager.set_config_state(self.config, "myfirstenv") serialized = self.config.serialize("myfirstenv") content, stat = yield self.client.get("/environment") self.assertEquals(yaml.load(content), yaml.load(serialized)) @inlineCallbacks def test_get_config(self): """ We can also retrieve a loaded config from the environment. """ manager = self.environment_state_manager yield manager.set_config_state(self.config, "myfirstenv") config = yield manager.get_config() serialized1 = self.config.serialize("myfirstenv") serialized2 = config.serialize("myfirstenv") self.assertEquals(yaml.load(serialized1), yaml.load(serialized2)) def test_get_config_when_missing(self): """ get_config should blow up politely if the environment config is missing. """ d = self.environment_state_manager.get_config() return self.assertFailure(d, EnvironmentStateNotFound) @inlineCallbacks def test_wait_for_config_pre_existing(self): """ wait_for_config() should return the environment immediately if it already exists. """ manager = self.environment_state_manager yield manager.set_config_state(self.config, "myfirstenv") config = yield manager.wait_for_config() serialized1 = self.config.serialize("myfirstenv") serialized2 = config.serialize("myfirstenv") self.assertEquals(yaml.load(serialized1), yaml.load(serialized2)) @inlineCallbacks def test_wait_for_config_pre_creation(self): """ wait_for_config() should wait until the environment configuration is made available, and return it. """ manager = self.environment_state_manager d = manager.wait_for_config() yield manager.set_config_state(self.config, "myfirstenv") config = yield d serialized1 = self.config.serialize("myfirstenv") serialized2 = config.serialize("myfirstenv") self.assertEquals(yaml.load(serialized1), yaml.load(serialized2)) @inlineCallbacks def test_wait_for_config_retries_on_race(self): """ If the config seems to exist, but then goes away, try again. """ mock_manager = self.mocker.patch(self.environment_state_manager) mock_manager.get_config() self.mocker.throw(EnvironmentStateNotFound()) mock_manager.get_config() self.mocker.passthrough() self.mocker.replay() manager = self.environment_state_manager yield manager.set_config_state(self.config, "myfirstenv") config = yield manager.wait_for_config() serialized1 = self.config.serialize("myfirstenv") serialized2 = config.serialize("myfirstenv") self.assertEquals(yaml.load(serialized1), yaml.load(serialized2))
def push_env_constraints(self, *constraint_strs): esm = EnvironmentStateManager(self.client) constraint_set = yield esm.get_constraint_set() yield esm.set_constraints(constraint_set.parse(constraint_strs))
def push_config(self, name, config): self.write_config(yaml.dump(config)) self.config.load() esm = EnvironmentStateManager(self.client) return esm.set_config_state(self.config, name)
class EnvironmentStateManagerTest(StateTestBase, EnvironmentsConfigTestBase): @inlineCallbacks def setUp(self): yield super(EnvironmentStateManagerTest, self).setUp() self.environment_state_manager = EnvironmentStateManager(self.client) self.write_config(SAMPLE_ENV) self.config.load() @inlineCallbacks def tearDown(self): yield super(EnvironmentStateManagerTest, self).tearDown() @inlineCallbacks def test_set_config_state(self): """ The simplest thing the manager can do is serialize a given environment and save it in zookeeper. """ manager = self.environment_state_manager yield manager.set_config_state(self.config, "myfirstenv") serialized = self.config.serialize("myfirstenv") content, stat = yield self.client.get("/environment") self.assertEquals(yaml.load(content), yaml.load(serialized)) @inlineCallbacks def test_set_config_state_replaces_environment(self): """ Setting the environment should also work with an existing environment. """ yield self.client.create("/environment", "Replace me!") manager = self.environment_state_manager yield manager.set_config_state(self.config, "myfirstenv") serialized = self.config.serialize("myfirstenv") content, stat = yield self.client.get("/environment") self.assertEquals(yaml.load(content), yaml.load(serialized)) @inlineCallbacks def test_get_config(self): """ We can also retrieve a loaded config from the environment. """ manager = self.environment_state_manager yield manager.set_config_state(self.config, "myfirstenv") config = yield manager.get_config() serialized1 = self.config.serialize("myfirstenv") serialized2 = config.serialize("myfirstenv") self.assertEquals(yaml.load(serialized1), yaml.load(serialized2)) def test_get_config_when_missing(self): """ get_config should blow up politely if the environment config is missing. """ d = self.environment_state_manager.get_config() return self.assertFailure(d, EnvironmentStateNotFound) @inlineCallbacks def test_get_in_legacy_environment_no(self): yield self.push_default_config() esm = self.environment_state_manager legacy = yield esm.get_in_legacy_environment() self.assertEquals(legacy, False) @inlineCallbacks def test_get_in_legacy_environment_yes(self): yield self.push_default_config() self.client.delete("/constraints") esm = self.environment_state_manager legacy = yield esm.get_in_legacy_environment() self.assertEquals(legacy, True) def test_get_constraint_set_no_env(self): d = self.environment_state_manager.get_constraint_set() return self.assertFailure(d, EnvironmentStateNotFound) @inlineCallbacks def test_get_constraint_set(self): yield self.push_default_config() cs = yield self.environment_state_manager.get_constraint_set() constraints = cs.parse(["arch=any", "cpu=10"]) self.assertEquals(constraints, { "ubuntu-series": None, "provider-type": "dummy", "arch": None, "cpu": 10.0, "mem": 512.0}) def test_get_constraints_no_env(self): d = self.environment_state_manager.get_constraints() return self.assertFailure(d, EnvironmentStateNotFound) @inlineCallbacks def test_get_constraints_env_with_no_node(self): yield self.push_default_config() self.client.delete("/constraints") constraints = yield self.environment_state_manager.get_constraints() self.assertEquals(constraints.data, {}) @inlineCallbacks def test_set_constraints(self): yield self.push_default_config() constraints = dummy_cs.parse(["cpu=any", "mem=32T"]) yield self.environment_state_manager.set_constraints(constraints) roundtrip = yield self.environment_state_manager.get_constraints() self.assertEquals(roundtrip, constraints)
def deploy(env_config, environment, repository_path, charm_name, service_name, log, config_file=None, num_units=1): """Deploy a charm within an environment. This will publish the charm to the environment, creating a service from the charm, and get it set to be launched on a new machine. """ repo, charm_url = resolve( charm_name, repository_path, environment.default_series) # Validate config options prior to deployment attempt service_options = {} service_name = service_name or charm_url.name if config_file: service_options = parse_config_options(config_file, service_name) charm = yield repo.find(charm_url) charm_id = str(charm_url.with_revision(charm.get_revision())) provider = environment.get_machine_provider() placement_policy = provider.get_placement_policy() client = yield provider.connect() try: storage = yield provider.get_file_storage() service_manager = ServiceStateManager(client) environment_state_manager = EnvironmentStateManager(client) yield environment_state_manager.set_config_state( env_config, environment.name) # Publish the charm to juju publisher = CharmPublisher(client, storage) yield publisher.add_charm(charm_id, charm) result = yield publisher.publish() # In future we might have multiple charms be published at # the same time. For now, extract the charm_state from the # list. charm_state = result[0] # Create the service state service_state = yield service_manager.add_service_state( service_name, charm_state) # Use the charm's ConfigOptions instance to validate service # options.. Invalid options passed will thrown an exception # and prevent the deploy. state = yield service_state.get_config() charm_config = yield charm_state.get_config() # return the validated options with the defaults included service_options = charm_config.validate(service_options) state.update(service_options) yield state.write() # Create desired number of service units for i in xrange(num_units): unit_state = yield service_state.add_unit_state() yield place_unit(client, placement_policy, unit_state) # Check if we have any peer relations to establish if charm.metadata.peers: relation_manager = RelationStateManager(client) for peer_name, peer_info in charm.metadata.peers.items(): yield relation_manager.add_relation_state( RelationEndpoint(service_name, peer_info["interface"], peer_name, "peer")) log.info("Charm deployed as service: %r", service_name) finally: yield client.close()
def setUp(self): yield super(EnvironmentStateManagerTest, self).setUp() self.environment_state_manager = EnvironmentStateManager(self.client) self.write_config(SAMPLE_ENV) self.config.load()