def download_charm(client, charm_id, charms_directory): """Retrieve a charm from the provider storage to the local machine. """ charm_state_manager = CharmStateManager(client) charm_state = yield charm_state_manager.get_charm_state(charm_id) # Calculate local charm path checksum = yield charm_state.get_sha256() charm_key = under.quote("%s:%s" % (charm_state.id, checksum)) local_charm_path = os.path.join( charms_directory, charm_key) # Retrieve charm from provider storage link if charm_state.bundle_url.startswith("file://"): file_path = charm_state.bundle_url[len("file://"):] if not os.path.exists(file_path): raise FileNotFound(charm_state.bundle_url) shutil.copyfileobj(open(file_path), open(local_charm_path, "w")) else: try: yield downloadPage(charm_state.bundle_url, local_charm_path) except Error: raise FileNotFound(charm_state.bundle_url) returnValue(CharmBundle(local_charm_path))
def setUp(self): yield super(MachineStateManagerTest, self).setUp() self.charm_state_manager = CharmStateManager(self.client) self.machine_state_manager = MachineStateManager(self.client) self.service_state_manager = ServiceStateManager(self.client) self.charm_state = yield self.charm_state_manager.add_charm_state( local_charm_id(self.charm), self.charm, "")
def validate_hooks(client, unit_state, hook_names): # Assemble a list of valid hooks for the charm. valid_hooks = ["start", "stop", "install", "config-changed"] service_manager = ServiceStateManager(client) endpoints = yield service_manager.get_relation_endpoints( unit_state.service_name) endpoint_names = [ep.relation_name for ep in endpoints] for endpoint_name in endpoint_names: valid_hooks.extend([ endpoint_name + "-relation-joined", endpoint_name + "-relation-changed", endpoint_name + "-relation-departed", endpoint_name + "-relation-broken", ]) # Verify the debug names. for hook_name in hook_names: if hook_name in valid_hooks: continue break else: returnValue(True) # We dereference to the charm to give a fully qualified error # message. I wish this was a little easier to dereference, the # service_manager.get_relation_endpoints effectively does this # already. service_manager = ServiceStateManager(client) service_state = yield service_manager.get_service_state( unit_state.service_name) charm_id = yield service_state.get_charm_id() charm_manager = CharmStateManager(client) charm = yield charm_manager.get_charm_state(charm_id) raise InvalidCharmHook(charm.id, hook_name)
def setUp(self): yield super(CharmStateManagerTest, self).setUp() self.charm_state_manager = CharmStateManager(self.client) self.charm_id = local_charm_id(self.charm) self.unbundled_repo_path = self.makeDir() os.rmdir(self.unbundled_repo_path) shutil.copytree(unbundled_repository, self.unbundled_repo_path)
def __init__(self, client, machine_id, juju_directory): """Initialize a Unit Deployer. :param client: A connected zookeeper client. :param str machine_id: the ID of the machine the agent is being run on. :param str juju_directory: the directory the agent is running in. """ self.client = client self.machine_id = machine_id self.juju_directory = juju_directory self.service_state_manager = ServiceStateManager(self.client) self.charm_state_manager = CharmStateManager(self.client)
def download_charm(client, charm_id, charms_directory): charm_state_manager = CharmStateManager(client) charm_state = yield charm_state_manager.get_charm_state(charm_id) # Calculate local charm path checksum = yield charm_state.get_sha256() charm_key = under.quote("%s:%s" % (charm_state.id, checksum)) local_charm_path = os.path.join(charms_directory, charm_key) # Retrieve charm from provider storage link if charm_state.bundle_url.startswith("file://"): file_path = charm_state.bundle_url[len("file://"):] if not os.path.exists(file_path): raise FileNotFound(charm_state.bundle_url) shutil.copyfileobj(open(file_path), open(local_charm_path, "w")) else: try: yield downloadPage(charm_state.bundle_url, local_charm_path) except Error: raise FileNotFound(charm_state.bundle_url) returnValue(CharmBundle(local_charm_path))
def start(self): """Start the machine agent. Creates state directories on the machine, retrieves the machine state, and enables watch on assigned units. """ # Initialize directory paths. if not os.path.exists(self.charms_directory): os.makedirs(self.charms_directory) if not os.path.exists(self.units_directory): os.makedirs(self.units_directory) if not os.path.exists(self.unit_state_directory): os.makedirs(self.unit_state_directory) # Get state managers we'll be utilizing. self.service_state_manager = ServiceStateManager(self.client) self.charm_state_manager = CharmStateManager(self.client) # Retrieve the machine state for the machine we represent. machine_manager = MachineStateManager(self.client) self.machine_state = yield machine_manager.get_machine_state( self.get_machine_id()) # Watch assigned units for the machine. if self.get_watch_enabled(): self.machine_state.watch_assigned_units(self.watch_service_units) # Find out what provided the machine, and how to deploy units. settings = GlobalSettingsStateManager(self.client) self.provider_type = yield settings.get_provider_type() self.deploy_factory = get_deploy_factory(self.provider_type) # Connect the machine agent, broadcasting presence to the world. yield self.machine_state.connect_agent() log.info( "Machine agent started id:%s deploy:%r provider:%r" % (self.get_machine_id(), self.deploy_factory, self.provider_type))
def test_deploy_upgrade(self): """A charm can be deployed and get the latest version""" environment = self.config.get("firstenv") yield deploy.deploy( self.config, environment, self.unbundled_repo_path, "local:sample", "myblog", logging.getLogger("deploy"), []) yield deploy.deploy( self.config, environment, self.unbundled_repo_path, "local:sample", "myblog2", logging.getLogger("deploy"), [], upgrade=True) services = ServiceStateManager(self.client) service1 = yield services.get_service_state("myblog") s1_charm_id = yield service1.get_charm_id() service2 = yield services.get_service_state("myblog2") s2_charm_id = yield service2.get_charm_id() self.assertNotEqual(s1_charm_id, s2_charm_id) charms = CharmStateManager(self.client) charm1 = yield charms.get_charm_state(s1_charm_id) charm2 = yield charms.get_charm_state(s2_charm_id) self.assertEqual(charm1.revision + 1, charm2.revision)
def __init__(self, client, provider, log): """ Callable status command object. `client`: ZK client connection `provider`: machine provider for the environment `log`: a Python stdlib logger. """ self.client = client self.provider = provider self.log = log self.service_manager = ServiceStateManager(client) self.relation_manager = RelationStateManager(client) self.machine_manager = MachineStateManager(client) self.charm_manager = CharmStateManager(client) self._reset()
def start(self): """Start the machine agent. Creates state directories on the machine, retrieves the machine state, and enables watch on assigned units. """ # Initialize directory paths. if not os.path.exists(self.charms_directory): os.makedirs(self.charms_directory) if not os.path.exists(self.units_directory): os.makedirs(self.units_directory) if not os.path.exists(self.unit_state_directory): os.makedirs(self.unit_state_directory) # Get state managers we'll be utilizing. self.service_state_manager = ServiceStateManager(self.client) self.charm_state_manager = CharmStateManager(self.client) # Retrieve the machine state for the machine we represent. machine_manager = MachineStateManager(self.client) self.machine_state = yield machine_manager.get_machine_state( self.get_machine_id()) # Watch assigned units for the machine. if self.get_watch_enabled(): self.machine_state.watch_assigned_units( self.watch_service_units) # Find out what provided the machine, and how to deploy units. settings = GlobalSettingsStateManager(self.client) self.provider_type = yield settings.get_provider_type() self.deploy_factory = get_deploy_factory(self.provider_type) # Connect the machine agent, broadcasting presence to the world. yield self.machine_state.connect_agent() log.info("Machine agent started id:%s deploy:%r provider:%r" % ( self.get_machine_id(), self.deploy_factory, self.provider_type))
class CharmPublisher(object): """ Publishes a charm to an environment. """ def __init__(self, client, storage): self._client = client self._storage = storage self._charm_state_manager = CharmStateManager(self._client) self._charm_add_queue = [] self._charm_state_cache = {} @classmethod @inlineCallbacks def for_environment(cls, environment): provider = environment.get_machine_provider() storage = provider.get_file_storage() client = yield provider.connect() returnValue(cls(client, storage)) @inlineCallbacks def add_charm(self, charm_id, charm): """Schedule a charm for addition to an juju environment. Returns true if the charm is scheduled for upload, false if the charm is already present in juju. """ self._charm_add_queue.append((charm_id, charm)) if charm_id in self._charm_state_cache: returnValue(False) try: state = yield self._charm_state_manager.get_charm_state( charm_id) except CharmStateNotFound: pass else: log.info("Using cached charm version of %s" % charm.metadata.name) self._charm_state_cache[charm_id] = state returnValue(False) returnValue(True) def _publish_one(self, charm_id, charm): if charm_id in self._charm_state_cache: return succeed(self._charm_state_cache[charm_id]) bundle = charm.as_bundle() charm_file = open(bundle.path, "rb") charm_store_path = under.quote( "%s:%s" % (charm_id, bundle.get_sha256())) def close_charm_file(passthrough): charm_file.close() return passthrough def get_charm_url(result): return self._storage.get_url(charm_store_path) d = self._storage.put(charm_store_path, charm_file) d.addBoth(close_charm_file) d.addCallback(get_charm_url) d.addCallback(self._cb_store_charm_state, charm_id, bundle) d.addErrback(self._eb_verify_duplicate, charm_id, bundle) return d def publish(self): """Publish all added charms to provider storage and zookeeper. Returns the charm_state of all scheduled charms. """ publish_deferreds = [] for charm_id, charm in self._charm_add_queue: publish_deferreds.append(self._publish_one(charm_id, charm)) publish_deferred = DeferredList(publish_deferreds, fireOnOneErrback=1, consumeErrors=1) # callbacks and deferreds to unwind the dlist publish_deferred.addCallback(self._cb_extract_charm_state) publish_deferred.addErrback(self._eb_extract_error) return publish_deferred def _cb_extract_charm_state(self, result): return [r[1] for r in result] def _eb_extract_error(self, failure): failure.trap(FirstError) return failure.value.subFailure def _cb_store_charm_state(self, charm_url, charm_id, charm): return self._charm_state_manager.add_charm_state( charm_id, charm, charm_url) @inlineCallbacks def _eb_verify_duplicate(self, failure, charm_id, charm): """Detects duplicates vs. conflicts, raises stateerror on conflict.""" failure.trap(NodeExistsException) try: charm_state = \ yield self._charm_state_manager.get_charm_state(charm_id) except NoNodeException: # Check if the state goes away due to concurrent removal msg = "Charm removed concurrently during publish, please retry." raise StateChanged(msg) if charm_state.get_sha256() != charm.get_sha256(): msg = "Concurrent upload of charm has different checksum %s" % ( charm_id) raise StateChanged(msg)
def setUp(self): yield super(CharmStateManagerTest, self).setUp() self.charm_state_manager = CharmStateManager(self.client) self.charm_id = local_charm_id(self.charm)
class CharmStateManagerTest(StateTestBase): @inlineCallbacks def setUp(self): yield super(CharmStateManagerTest, self).setUp() self.charm_state_manager = CharmStateManager(self.client) self.charm_id = local_charm_id(self.charm) @inlineCallbacks def test_add_charm(self): """ Adding a Charm into a CharmStateManager should register the charm within the Zookeeper state, according to the specification. """ charm_state = yield self.charm_state_manager.add_charm_state( self.charm_id, self.charm, "http://example.com/abc") self.assertEquals(charm_state.id, "local:series/dummy-1") children = yield self.client.get_children("/charms") self.assertEquals(children, ["local_3a_series_2f_dummy-1"]) content, stat = yield self.client.get( "/charms/local_3a_series_2f_dummy-1") charm_data = yaml.load(content) self.assertEquals(charm_data, { "metadata": self.charm.metadata.get_serialization_data(), "config": self.charm.config.get_serialization_data(), "sha256": self.charm.get_sha256(), "url": "http://example.com/abc" }) @inlineCallbacks def test_get_charm(self): """ A CharmState should be available if one get()s a charm that was previously added into the manager. """ yield self.charm_state_manager.add_charm_state( self.charm_id, self.charm, "") charm_state = yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") self.assertEquals(charm_state.id, "local:series/dummy-1") @inlineCallbacks def test_charm_state_attributes(self): """ Verify that the basic (invariant) attributes of the CharmState are correctly in place. """ yield self.charm_state_manager.add_charm_state( self.charm_id, self.charm, "http://example.com/abc") charm_state = yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") self.assertEquals(charm_state.name, "dummy") self.assertEquals(charm_state.revision, 1) self.assertEquals(charm_state.id, "local:series/dummy-1") self.assertEquals(charm_state.bundle_url, "http://example.com/abc") @inlineCallbacks def test_charm_state_metadata(self): """ Check that the charm metadata was correctly saved and loaded. """ yield self.charm_state_manager.add_charm_state( self.charm_id, self.charm, "") charm_state = yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") metadata = yield charm_state.get_metadata() self.assertEquals(metadata.name, "dummy") @inlineCallbacks def test_charm_state_config_options(self): """Verify ConfigOptions present and correct.""" from juju.charm.tests.test_config import sample_yaml_data yield self.charm_state_manager.add_charm_state( self.charm_id, self.charm, "") charm_state = yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") config = yield charm_state.get_config() self.assertEquals(config.get_serialization_data(), sample_yaml_data) @inlineCallbacks def test_get_non_existing_charm_prior_to_initialization(self): """ Getting a charm before the charms node was even initialized should raise an error about the charm not being present. """ try: yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") except CharmStateNotFound, e: self.assertEquals(e.charm_id, "local:series/dummy-1") else:
class CharmStateManagerTest(StateTestBase): @inlineCallbacks def setUp(self): yield super(CharmStateManagerTest, self).setUp() self.charm_state_manager = CharmStateManager(self.client) self.charm_id = local_charm_id(self.charm) @inlineCallbacks def test_add_charm(self): """ Adding a Charm into a CharmStateManager should register the charm within the Zookeeper state, according to the specification. """ charm_state = yield self.charm_state_manager.add_charm_state( self.charm_id, self.charm, "http://example.com/abc") self.assertEquals(charm_state.id, "local:series/dummy-1") children = yield self.client.get_children("/charms") self.assertEquals(children, ["local_3a_series_2f_dummy-1"]) content, stat = yield self.client.get( "/charms/local_3a_series_2f_dummy-1") charm_data = yaml.load(content) self.assertEquals( charm_data, { "metadata": self.charm.metadata.get_serialization_data(), "config": self.charm.config.get_serialization_data(), "sha256": self.charm.get_sha256(), "url": "http://example.com/abc" }) @inlineCallbacks def test_get_charm(self): """ A CharmState should be available if one get()s a charm that was previously added into the manager. """ yield self.charm_state_manager.add_charm_state(self.charm_id, self.charm, "") charm_state = yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") self.assertEquals(charm_state.id, "local:series/dummy-1") @inlineCallbacks def test_charm_state_attributes(self): """ Verify that the basic (invariant) attributes of the CharmState are correctly in place. """ yield self.charm_state_manager.add_charm_state( self.charm_id, self.charm, "http://example.com/abc") charm_state = yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") self.assertEquals(charm_state.name, "dummy") self.assertEquals(charm_state.revision, 1) self.assertEquals(charm_state.id, "local:series/dummy-1") self.assertEquals(charm_state.bundle_url, "http://example.com/abc") @inlineCallbacks def test_charm_state_metadata(self): """ Check that the charm metadata was correctly saved and loaded. """ yield self.charm_state_manager.add_charm_state(self.charm_id, self.charm, "") charm_state = yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") metadata = yield charm_state.get_metadata() self.assertEquals(metadata.name, "dummy") @inlineCallbacks def test_charm_state_config_options(self): """Verify ConfigOptions present and correct.""" from juju.charm.tests.test_config import sample_yaml_data yield self.charm_state_manager.add_charm_state(self.charm_id, self.charm, "") charm_state = yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") config = yield charm_state.get_config() self.assertEquals(config.get_serialization_data(), sample_yaml_data) @inlineCallbacks def test_get_non_existing_charm_prior_to_initialization(self): """ Getting a charm before the charms node was even initialized should raise an error about the charm not being present. """ try: yield self.charm_state_manager.get_charm_state( "local:series/dummy-1") except CharmStateNotFound, e: self.assertEquals(e.charm_id, "local:series/dummy-1") else:
def __init__(self, client, storage): self._client = client self._storage = storage self._charm_state_manager = CharmStateManager(self._client) self._charm_add_queue = [] self._charm_state_cache = {}
def collect(scope, machine_provider, client, log): """Extract status information into nested dicts for rendering. `scope`: an optional list of name specifiers. Globbing based wildcards supported. Defaults to all units, services and relations. `machine_provider`: machine provider for the environment `client`: ZK client connection `log`: a Python stdlib logger. """ service_manager = ServiceStateManager(client) relation_manager = RelationStateManager(client) machine_manager = MachineStateManager(client) charm_manager = CharmStateManager(client) service_data = {} machine_data = {} state = dict(services=service_data, machines=machine_data) seen_machines = set() filter_services, filter_units = digest_scope(scope) services = yield service_manager.get_all_service_states() for service in services: if len(filter_services): found = False for filter_service in filter_services: if fnmatch(service.service_name, filter_service): found = True break if not found: continue unit_data = {} relation_data = {} charm_id = yield service.get_charm_id() charm = yield charm_manager.get_charm_state(charm_id) service_data[service.service_name] = dict(units=unit_data, charm=charm.id, relations=relation_data) exposed = yield service.get_exposed_flag() if exposed: service_data[service.service_name].update(exposed=exposed) units = yield service.get_all_unit_states() unit_matched = False relations = yield relation_manager.get_relations_for_service(service) for unit in units: if len(filter_units): found = False for filter_unit in filter_units: if fnmatch(unit.unit_name, filter_unit): found = True break if not found: continue u = unit_data[unit.unit_name] = dict() machine_id = yield unit.get_assigned_machine_id() u["machine"] = machine_id unit_workflow_client = WorkflowStateClient(client, unit) unit_state = yield unit_workflow_client.get_state() if not unit_state: u["state"] = "pending" else: unit_connected = yield unit.has_agent() u["state"] = unit_state if unit_connected else "down" if exposed: open_ports = yield unit.get_open_ports() u["open-ports"] = [ "{port}/{proto}".format(**port_info) for port_info in open_ports ] u["public-address"] = yield unit.get_public_address() # indicate we should include information about this # machine later seen_machines.add(machine_id) unit_matched = True # collect info on each relation for the service unit relation_status = {} for relation in relations: try: relation_unit = yield relation.get_unit_state(unit) except UnitRelationStateNotFound: # This exception will occur when relations are # established between services without service # units, and therefore never have any # corresponding service relation units. This # scenario does not occur in actual deployments, # but can happen in test circumstances. In # particular, it will happen with a misconfigured # provider, which exercises this codepath. continue # should not occur, but status should not fail relation_workflow_client = WorkflowStateClient( client, relation_unit) relation_workflow_state = \ yield relation_workflow_client.get_state() relation_status[relation.relation_name] = dict( state=relation_workflow_state) u["relations"] = relation_status # after filtering units check if any matched or remove the # service from the output if filter_units and not unit_matched: del service_data[service.service_name] continue for relation in relations: rel_services = yield relation.get_service_states() # A single related service implies a peer relation. More # imply a bi-directional provides/requires relationship. # In the later case we omit the local side of the relation # when reporting. if len(rel_services) > 1: # Filter out self from multi-service relations. rel_services = [ rsn for rsn in rel_services if rsn.service_name != service.service_name ] if len(rel_services) > 1: raise ValueError("Unexpected relationship with more " "than 2 endpoints") rel_service = rel_services[0] relation_data[relation.relation_name] = rel_service.service_name machines = yield machine_manager.get_all_machine_states() for machine_state in machines: if (filter_services or filter_units) and \ machine_state.id not in seen_machines: continue instance_id = yield machine_state.get_instance_id() m = {"instance-id": instance_id \ if instance_id is not None else "pending"} if instance_id is not None: try: pm = yield machine_provider.get_machine(instance_id) m["dns-name"] = pm.dns_name m["instance-state"] = pm.state if (yield machine_state.has_agent()): # if the agent's connected, we're fine m["state"] = "running" else: units = (yield machine_state.get_all_service_unit_states()) for unit in units: unit_workflow_client = WorkflowStateClient( client, unit) if (yield unit_workflow_client.get_state()): # for unit to have a state, its agent must have # run, which implies the machine agent must have # been running correctly at some point in the past m["state"] = "down" break else: # otherwise we're probably just still waiting m["state"] = "not-started" except ProviderError: # The provider doesn't have machine information log.error("Machine provider information missing: machine %s" % (machine_state.id)) machine_data[machine_state.id] = m returnValue(state)
class MachineAgent(BaseAgent): """An juju Machine Agent. The machine agent is responsible for monitoring service units assigned to a machine. If a new unit is assigned to machine, the machine agent will download the charm, create a working space for the service unit agent, and then launch it. Additionally the machine agent will monitor the running service unit agents on the machine, via their ephemeral nodes, and restart them if they die. """ name = "juju-machine-agent" unit_agent_module = "juju.agents.unit" @property def charms_directory(self): return os.path.join(self.config["juju_directory"], "charms") @property def units_directory(self): return os.path.join(self.config["juju_directory"], "units") @property def unit_state_directory(self): return os.path.join(self.config["juju_directory"], "state") @inlineCallbacks def start(self): """Start the machine agent. Creates state directories on the machine, retrieves the machine state, and enables watch on assigned units. """ # Initialize directory paths. if not os.path.exists(self.charms_directory): os.makedirs(self.charms_directory) if not os.path.exists(self.units_directory): os.makedirs(self.units_directory) if not os.path.exists(self.unit_state_directory): os.makedirs(self.unit_state_directory) # Get state managers we'll be utilizing. self.service_state_manager = ServiceStateManager(self.client) self.charm_state_manager = CharmStateManager(self.client) # Retrieve the machine state for the machine we represent. machine_manager = MachineStateManager(self.client) self.machine_state = yield machine_manager.get_machine_state( self.get_machine_id()) # Watch assigned units for the machine. if self.get_watch_enabled(): self.machine_state.watch_assigned_units(self.watch_service_units) # Find out what provided the machine, and how to deploy units. settings = GlobalSettingsStateManager(self.client) self.provider_type = yield settings.get_provider_type() self.deploy_factory = get_deploy_factory(self.provider_type) # Connect the machine agent, broadcasting presence to the world. yield self.machine_state.connect_agent() log.info( "Machine agent started id:%s deploy:%r provider:%r" % (self.get_machine_id(), self.deploy_factory, self.provider_type)) def download_charm(self, charm_state): """Retrieve a charm from the provider storage to the local machine. Utilizes a local charm cache to avoid repeated downloading of the same charm. """ log.debug("Downloading charm %s to %s", charm_state.id, self.charms_directory) return download_charm(self.client, charm_state.id, self.charms_directory) @inlineCallbacks def watch_service_units(self, old_units, new_units): """Callback invoked when the assigned service units change. """ if old_units is None: old_units = set() log.debug("Units changed old:%s new:%s", old_units, new_units) stopped = old_units - new_units started = new_units - old_units for unit_name in stopped: log.debug("Stopping service unit: %s ...", unit_name) try: yield self.kill_service_unit(unit_name) except Exception: log.exception("Error stopping unit: %s", unit_name) for unit_name in started: log.debug("Starting service unit: %s ...", unit_name) try: yield self.start_service_unit(unit_name) except Exception: log.exception("Error starting unit: %s", unit_name) @inlineCallbacks def start_service_unit(self, service_unit_name): """Start a service unit on the machine. Downloads the charm, and extract it to the service unit directory, and launch the service unit agent within the unit directory. """ # Retrieve the charm state to get at the charm. unit_state = yield self.service_state_manager.get_unit_state( service_unit_name) charm_id = yield unit_state.get_charm_id() charm_state = yield self.charm_state_manager.get_charm_state(charm_id) # Download the charm. bundle = yield self.download_charm(charm_state) # Use deployment to setup the workspace and start the unit agent. deployment = self.deploy_factory(service_unit_name, self.config["juju_directory"]) running = yield deployment.is_running() if not running: log.debug("Starting service unit %s", service_unit_name) yield deployment.start(self.get_machine_id(), self.client.servers, bundle) log.info("Started service unit %s", service_unit_name) def kill_service_unit(self, service_unit_name): """Stop service unit and destroy disk state, ala SIGKILL or lxc-destroy """ deployment = self.deploy_factory(service_unit_name, self.config["juju_directory"]) log.info("Stopping service unit %s...", service_unit_name) return deployment.destroy() def get_machine_id(self): """Get the id of the machine as known within the zk state.""" return self.config["machine_id"] def get_agent_name(self): return "Machine:%s" % (self.get_machine_id()) def configure(self, options): super(MachineAgent, self).configure(options) if not options.get("machine_id"): msg = ("--machine-id must be provided in the command line, " "or $JUJU_MACHINE_ID in the environment") raise JujuError(msg) @classmethod def setup_options(cls, parser): super(MachineAgent, cls).setup_options(parser) machine_id = os.environ.get("JUJU_MACHINE_ID", "") parser.add_argument("--machine-id", default=machine_id) return parser
class StatusCommand(object): def __init__(self, client, provider, log): """ Callable status command object. `client`: ZK client connection `provider`: machine provider for the environment `log`: a Python stdlib logger. """ self.client = client self.provider = provider self.log = log self.service_manager = ServiceStateManager(client) self.relation_manager = RelationStateManager(client) self.machine_manager = MachineStateManager(client) self.charm_manager = CharmStateManager(client) self._reset() def _reset(self, scope=None): # init per-run state # self.state is assembled by the various process methods # intermediate access to state is made more convenient # using these references to its internals. self.service_data = {} # service name: service info self.machine_data = {} # machine id: machine state self.unit_data = {} # unit_name :unit_info # used in collecting subordinate (which are added to state in a two # phase pass) self.subordinates = {} # service : set(principal service names) self.state = dict(services=self.service_data, machines=self.machine_data) # Filtering info self.seen_machines = set() self.filter_services, self.filter_units = digest_scope(scope) @inlineCallbacks def __call__(self, scope=None): """Extract status information into nested dicts for rendering. `scope`: an optional list of name specifiers. Globbing based wildcards supported. Defaults to all units, services and relations. """ self._reset(scope) # Pass 1 Gather Data (including principals and subordinates) # this builds unit info and container relationships # which is assembled in pass 2 below yield self._process_services() # Pass 2: Nest information according to principal/subordinates # rules self._process_subordinates() yield self._process_machines() returnValue(self.state) @inlineCallbacks def _process_services(self): """ For each service gather the following information:: <service name>: charm: <charm name> exposed: <expose boolean> relations: <relation info -- see _process_relations> units: <unit info -- see _process_units> """ services = yield self.service_manager.get_all_service_states() for service in services: if len(self.filter_services): found = False for filter_service in self.filter_services: if fnmatch(service.service_name, filter_service): found = True break if not found: continue yield self._process_service(service) @inlineCallbacks def _process_service(self, service): """ Gather the service info (described in _process_services). `service`: ServiceState instance """ relation_data = {} service_data = self.service_data charm_id = yield service.get_charm_id() charm = yield self.charm_manager.get_charm_state(charm_id) service_data[service.service_name] = ( dict(units={}, charm=charm.id, relations=relation_data)) if (yield service.is_subordinate()): service_data[service.service_name]["subordinate"] = True yield self._process_expose(service) relations, rel_svc_map = yield self._process_relation_map( service) unit_matched = yield self._process_units(service, relations, rel_svc_map) # after filtering units check if any matched or remove the # service from the output if self.filter_units and not unit_matched: del service_data[service.service_name] return yield self._process_relations(service, relations, rel_svc_map) @inlineCallbacks def _process_units(self, service, relations, rel_svc_map): """ Gather unit information for a service:: <unit name>: agent-state: <started|pendding|etc> machine: <machine id> open-ports: ["port/protocol", ...] public-address: <public dns name or ip> subordinates: <optional nested units of subordinate services> `service`: ServiceState intance `relations`: list of ServiceRelationState instance for this service `rel_svc_map`: maps relation internal ids to the remote endpoint service name. This references the name of the remote endpoint and so is generated per service. """ units = yield service.get_all_unit_states() unit_matched = False for unit in units: if len(self.filter_units): found = False for filter_unit in self.filter_units: if fnmatch(unit.unit_name, filter_unit): found = True break if not found: continue yield self._process_unit(service, unit, relations, rel_svc_map) unit_matched = True returnValue(unit_matched) @inlineCallbacks def _process_unit(self, service, unit, relations, rel_svc_map): """ Generate unit info for a single unit of a single service. `unit`: ServiceUnitState see `_process_units` for an explanation of other arguments. """ u = self.unit_data[unit.unit_name] = dict() container = yield unit.get_container() if container: u["container"] = container.unit_name self.subordinates.setdefault(unit.service_name, set()).add(container.service_name) machine_id = yield unit.get_assigned_machine_id() u["machine"] = machine_id unit_workflow_client = WorkflowStateClient(self.client, unit) unit_state = yield unit_workflow_client.get_state() if not unit_state: u["agent-state"] = "pending" else: unit_connected = yield unit.has_agent() u["agent-state"] = unit_state.replace("_", "-") \ if unit_connected else "down" exposed = self.service_data[service.service_name].get("exposed") if exposed: open_ports = yield unit.get_open_ports() u["open-ports"] = ["{port}/{proto}".format(**port_info) for port_info in open_ports] u["public-address"] = yield unit.get_public_address() # indicate we should include information about this # machine later self.seen_machines.add(machine_id) # collect info on each relation for the service unit yield self._process_unit_relations(service, unit, relations, rel_svc_map) @inlineCallbacks def _process_relation_map(self, service): """Generate a mapping from a services relations to the service name of the remote endpoints. returns: ([ServiceRelationState, ...], mapping) """ relation_data = self.service_data[service.service_name]["relations"] relation_mgr = self.relation_manager relations = yield relation_mgr.get_relations_for_service(service) rel_svc_map = {} for relation in relations: rel_services = yield relation.get_service_states() # A single related service implies a peer relation. More # imply a bi-directional provides/requires relationship. # In the later case we omit the local side of the relation # when reporting. if len(rel_services) > 1: # Filter out self from multi-service relations. rel_services = [ rsn for rsn in rel_services if rsn.service_name != service.service_name] if len(rel_services) > 1: raise ValueError("Unexpected relationship with more " "than 2 endpoints") rel_service = rel_services[0] relation_data.setdefault(relation.relation_name, set()).add( rel_service.service_name) rel_svc_map[relation.internal_relation_id] = ( rel_service.service_name) returnValue((relations, rel_svc_map)) @inlineCallbacks def _process_relations(self, service, relations, rel_svc_map): """Generate relation information for a given service Each service with relations will have a relations dict nested under it with one or more relations described:: relations: <relation name>: - <remote service name> """ relation_data = self.service_data[service.service_name]["relations"] for relation in relations: rel_services = yield relation.get_service_states() # A single related service implies a peer relation. More # imply a bi-directional provides/requires relationship. # In the later case we omit the local side of the relation # when reporting. if len(rel_services) > 1: # Filter out self from multi-service relations. rel_services = [ rsn for rsn in rel_services if rsn.service_name != service.service_name] if len(rel_services) > 1: raise ValueError("Unexpected relationship with more " "than 2 endpoints") rel_service = rel_services[0] relation_data.setdefault( relation.relation_name, set()).add( rel_service.service_name) rel_svc_map[relation.internal_relation_id] = ( rel_service.service_name) # Normalize the sets back to lists for r in relation_data: relation_data[r] = sorted(relation_data[r]) @inlineCallbacks def _process_unit_relations(self, service, unit, relations, rel_svc_map): """Collect UnitRelationState information per relation and per unit. Includes information under each unit for its relations including its relation state and information about any possible errors. see `_process_relations` for argument information """ u = self.unit_data[unit.unit_name] relation_errors = {} for relation in relations: try: relation_unit = yield relation.get_unit_state(unit) except UnitRelationStateNotFound: # This exception will occur when relations are # established between services without service # units, and therefore never have any # corresponding service relation units. # UPDATE: common with subordinate services, and # some testing scenarios. continue relation_workflow_client = WorkflowStateClient( self.client, relation_unit) workflow_state = yield relation_workflow_client.get_state() rel_svc_name = rel_svc_map.get(relation.internal_relation_id) if rel_svc_name and workflow_state not in ("up", None): relation_errors.setdefault( relation.relation_name, set()).add(rel_svc_name) if relation_errors: # Normalize sets and store. u["relation-errors"] = dict( [(r, sorted(relation_errors[r])) for r in relation_errors]) def _process_subordinates(self): """Properly nest subordinate units under their principal service's unit nodes. Services and units are generated in one pass, then iterated by this method to structure the output data to reflect actual unit containment. Subordinate units will include the follow:: subordinate: true subordinate-to: - <principal service names> Principal services that have subordinates will include:: subordinates: <subordinate unit name>: agent-state: <agent state> """ service_data = self.service_data for unit_name, u in self.unit_data.iteritems(): container = u.get("container") if container: d = self.unit_data[container].setdefault("subordinates", {}) d[unit_name] = u # remove keys that don't appear in output or come from container for key in ("container", "machine", "public-address"): u.pop(key, None) else: service_name = parse_service_name(unit_name) service_data[service_name]["units"][unit_name] = u for sub_service, principal_services in self.subordinates.iteritems(): service_data[sub_service]["subordinate-to"] = sorted(principal_services) service_data[sub_service].pop("units", None) @inlineCallbacks def _process_expose(self, service): """Indicate if a service is exposed or not.""" exposed = yield service.get_exposed_flag() if exposed: self.service_data[service.service_name].update(exposed=exposed) returnValue(exposed) @inlineCallbacks def _process_machines(self): """Gather machine information. machines: <machine id>: agent-state: <agent state> dns-name: <dns name> instance-id: <provider specific instance id> instance-state: <instance state> """ machines = yield self.machine_manager.get_all_machine_states() for machine_state in machines: if (self.filter_services or self.filter_units) and \ machine_state.id not in self.seen_machines: continue yield self._process_machine(machine_state) @inlineCallbacks def _process_machine(self, machine_state): """ `machine_state`: MachineState instance """ instance_id = yield machine_state.get_instance_id() m = {"instance-id": instance_id \ if instance_id is not None else "pending"} if instance_id is not None: try: pm = yield self.provider.get_machine(instance_id) m["dns-name"] = pm.dns_name m["instance-state"] = pm.state if (yield machine_state.has_agent()): # if the agent's connected, we're fine m["agent-state"] = "running" else: units = ( yield machine_state.get_all_service_unit_states()) for unit in units: unit_workflow_client = WorkflowStateClient( self.client, unit) if (yield unit_workflow_client.get_state()): # for unit to have a state, its agent must # have run, which implies the machine agent # must have been running correctly at some # point in the past m["agent-state"] = "down" break else: # otherwise we're probably just still waiting m["agent-state"] = "not-started" except ProviderError: # The provider doesn't have machine information self.log.error( "Machine provider information missing: machine %s" % ( machine_state.id)) self.machine_data[machine_state.id] = m
class CharmPublisher(object): """ Publishes a charm to an environment. """ def __init__(self, client, storage): self._client = client self._storage = storage self._charm_state_manager = CharmStateManager(self._client) self._charm_add_queue = [] self._charm_state_cache = {} @classmethod @inlineCallbacks def for_environment(cls, environment): provider = environment.get_machine_provider() storage = provider.get_file_storage() client = yield provider.connect() returnValue(cls(client, storage)) @inlineCallbacks def add_charm(self, charm_id, charm): """Schedule a charm for addition to an juju environment. Returns true if the charm is scheduled for upload, false if the charm is already present in juju. """ self._charm_add_queue.append((charm_id, charm)) if charm_id in self._charm_state_cache: returnValue(False) try: state = yield self._charm_state_manager.get_charm_state(charm_id) except CharmStateNotFound: pass else: self._charm_state_cache[charm_id] = state returnValue(False) returnValue(True) def _publish_one(self, charm_id, charm): if charm_id in self._charm_state_cache: return succeed(self._charm_state_cache[charm_id]) bundle = charm.as_bundle() charm_file = open(bundle.path, "rb") charm_store_path = under.quote("%s:%s" % (charm_id, bundle.get_sha256())) def close_charm_file(passthrough): charm_file.close() return passthrough def get_charm_url(result): return self._storage.get_url(charm_store_path) d = self._storage.put(charm_store_path, charm_file) d.addBoth(close_charm_file) d.addCallback(get_charm_url) d.addCallback(self._cb_store_charm_state, charm_id, bundle) d.addErrback(self._eb_verify_duplicate, charm_id, bundle) return d def publish(self): """Publish all added charms to provider storage and zookeeper. Returns the charm_state of all scheduled charms. """ publish_deferreds = [] for charm_id, charm in self._charm_add_queue: publish_deferreds.append(self._publish_one(charm_id, charm)) publish_deferred = DeferredList(publish_deferreds, fireOnOneErrback=1, consumeErrors=1) # callbacks and deferreds to unwind the dlist publish_deferred.addCallback(self._cb_extract_charm_state) publish_deferred.addErrback(self._eb_extract_error) return publish_deferred def _cb_extract_charm_state(self, result): return [r[1] for r in result] def _eb_extract_error(self, failure): failure.trap(FirstError) return failure.value.subFailure def _cb_store_charm_state(self, charm_url, charm_id, charm): return self._charm_state_manager.add_charm_state( charm_id, charm, charm_url) @inlineCallbacks def _eb_verify_duplicate(self, failure, charm_id, charm): """Detects duplicates vs. conflicts, raises stateerror on conflict.""" failure.trap(NodeExistsException) try: charm_state = \ yield self._charm_state_manager.get_charm_state(charm_id) except NoNodeException: # Check if the state goes away due to concurrent removal msg = "Charm removed concurrently during publish, please retry." raise StateChanged(msg) if charm_state.get_sha256() != charm.get_sha256(): msg = "Concurrent upload of charm has different checksum %s" % ( charm_id) raise StateChanged(msg)
class MachineAgent(BaseAgent): """An juju Machine Agent. The machine agent is responsible for monitoring service units assigned to a machine. If a new unit is assigned to machine, the machine agent will download the charm, create a working space for the service unit agent, and then launch it. Additionally the machine agent will monitor the running service unit agents on the machine, via their ephemeral nodes, and restart them if they die. """ name = "juju-machine-agent" unit_agent_module = "juju.agents.unit" @property def charms_directory(self): return os.path.join(self.config["juju_directory"], "charms") @property def units_directory(self): return os.path.join(self.config["juju_directory"], "units") @property def unit_state_directory(self): return os.path.join(self.config["juju_directory"], "state") @inlineCallbacks def start(self): """Start the machine agent. Creates state directories on the machine, retrieves the machine state, and enables watch on assigned units. """ # Initialize directory paths. if not os.path.exists(self.charms_directory): os.makedirs(self.charms_directory) if not os.path.exists(self.units_directory): os.makedirs(self.units_directory) if not os.path.exists(self.unit_state_directory): os.makedirs(self.unit_state_directory) # Get state managers we'll be utilizing. self.service_state_manager = ServiceStateManager(self.client) self.charm_state_manager = CharmStateManager(self.client) # Retrieve the machine state for the machine we represent. machine_manager = MachineStateManager(self.client) self.machine_state = yield machine_manager.get_machine_state( self.get_machine_id()) # Watch assigned units for the machine. if self.get_watch_enabled(): self.machine_state.watch_assigned_units( self.watch_service_units) # Find out what provided the machine, and how to deploy units. settings = GlobalSettingsStateManager(self.client) self.provider_type = yield settings.get_provider_type() self.deploy_factory = get_deploy_factory(self.provider_type) # Connect the machine agent, broadcasting presence to the world. yield self.machine_state.connect_agent() log.info("Machine agent started id:%s deploy:%r provider:%r" % ( self.get_machine_id(), self.deploy_factory, self.provider_type)) def download_charm(self, charm_state): """Retrieve a charm from the provider storage to the local machine. Utilizes a local charm cache to avoid repeated downloading of the same charm. """ log.debug("Downloading charm %s to %s", charm_state.id, self.charms_directory) return download_charm( self.client, charm_state.id, self.charms_directory) @inlineCallbacks def watch_service_units(self, old_units, new_units): """Callback invoked when the assigned service units change. """ if old_units is None: old_units = set() log.debug( "Units changed old:%s new:%s", old_units, new_units) stopped = old_units - new_units started = new_units - old_units for unit_name in stopped: log.debug("Stopping service unit: %s ...", unit_name) try: yield self.kill_service_unit(unit_name) except Exception: log.exception("Error stopping unit: %s", unit_name) for unit_name in started: log.debug("Starting service unit: %s ...", unit_name) try: yield self.start_service_unit(unit_name) except Exception: log.exception("Error starting unit: %s", unit_name) @inlineCallbacks def start_service_unit(self, service_unit_name): """Start a service unit on the machine. Downloads the charm, and extract it to the service unit directory, and launch the service unit agent within the unit directory. """ # Retrieve the charm state to get at the charm. unit_state = yield self.service_state_manager.get_unit_state( service_unit_name) charm_id = yield unit_state.get_charm_id() charm_state = yield self.charm_state_manager.get_charm_state( charm_id) # Download the charm. bundle = yield self.download_charm(charm_state) # Use deployment to setup the workspace and start the unit agent. deployment = self.deploy_factory( service_unit_name, self.config["juju_directory"]) running = yield deployment.is_running() if not running: log.debug("Starting service unit %s", service_unit_name) yield deployment.start( self.get_machine_id(), self.client.servers, bundle) log.info("Started service unit %s", service_unit_name) def kill_service_unit(self, service_unit_name): """Stop service unit and destroy disk state, ala SIGKILL or lxc-destroy """ deployment = self.deploy_factory( service_unit_name, self.config["juju_directory"]) log.info("Stopping service unit %s...", service_unit_name) return deployment.destroy() def get_machine_id(self): """Get the id of the machine as known within the zk state.""" return self.config["machine_id"] def get_agent_name(self): return "Machine:%s" % (self.get_machine_id()) def configure(self, options): super(MachineAgent, self).configure(options) if not options.get("machine_id"): msg = ("--machine-id must be provided in the command line, " "or $JUJU_MACHINE_ID in the environment") raise JujuError(msg) @classmethod def setup_options(cls, parser): super(MachineAgent, cls).setup_options(parser) machine_id = os.environ.get("JUJU_MACHINE_ID", "") parser.add_argument( "--machine-id", default=machine_id) return parser
def collect(scope, machine_provider, client, log): """Extract status information into nested dicts for rendering. `scope`: an optional list of name specifiers. Globbing based wildcards supported. Defaults to all units, services and relations. `machine_provider`: machine provider for the environment `client`: ZK client connection `log`: a Python stdlib logger. """ service_manager = ServiceStateManager(client) relation_manager = RelationStateManager(client) machine_manager = MachineStateManager(client) charm_manager = CharmStateManager(client) service_data = {} machine_data = {} state = dict(services=service_data, machines=machine_data) seen_machines = set() filter_services, filter_units = digest_scope(scope) services = yield service_manager.get_all_service_states() for service in services: if len(filter_services): found = False for filter_service in filter_services: if fnmatch(service.service_name, filter_service): found = True break if not found: continue unit_data = {} relation_data = {} charm_id = yield service.get_charm_id() charm = yield charm_manager.get_charm_state(charm_id) service_data[service.service_name] = dict(units=unit_data, charm=charm.id, relations=relation_data) exposed = yield service.get_exposed_flag() if exposed: service_data[service.service_name].update(exposed=exposed) units = yield service.get_all_unit_states() unit_matched = False relations = yield relation_manager.get_relations_for_service(service) for unit in units: if len(filter_units): found = False for filter_unit in filter_units: if fnmatch(unit.unit_name, filter_unit): found = True break if not found: continue u = unit_data[unit.unit_name] = dict() machine_id = yield unit.get_assigned_machine_id() u["machine"] = machine_id unit_workflow_client = WorkflowStateClient(client, unit) unit_state = yield unit_workflow_client.get_state() if not unit_state: u["state"] = "pending" else: unit_connected = yield unit.has_agent() u["state"] = unit_state if unit_connected else "down" if exposed: open_ports = yield unit.get_open_ports() u["open-ports"] = ["{port}/{proto}".format(**port_info) for port_info in open_ports] u["public-address"] = yield unit.get_public_address() # indicate we should include information about this # machine later seen_machines.add(machine_id) unit_matched = True # collect info on each relation for the service unit relation_status = {} for relation in relations: try: relation_unit = yield relation.get_unit_state(unit) except UnitRelationStateNotFound: # This exception will occur when relations are # established between services without service # units, and therefore never have any # corresponding service relation units. This # scenario does not occur in actual deployments, # but can happen in test circumstances. In # particular, it will happen with a misconfigured # provider, which exercises this codepath. continue # should not occur, but status should not fail relation_workflow_client = WorkflowStateClient( client, relation_unit) relation_workflow_state = \ yield relation_workflow_client.get_state() relation_status[relation.relation_name] = dict( state=relation_workflow_state) u["relations"] = relation_status # after filtering units check if any matched or remove the # service from the output if filter_units and not unit_matched: del service_data[service.service_name] continue for relation in relations: rel_services = yield relation.get_service_states() # A single related service implies a peer relation. More # imply a bi-directional provides/requires relationship. # In the later case we omit the local side of the relation # when reporting. if len(rel_services) > 1: # Filter out self from multi-service relations. rel_services = [ rsn for rsn in rel_services if rsn.service_name != service.service_name] if len(rel_services) > 1: raise ValueError("Unexpected relationship with more " "than 2 endpoints") rel_service = rel_services[0] relation_data[relation.relation_name] = rel_service.service_name machines = yield machine_manager.get_all_machine_states() for machine_state in machines: if (filter_services or filter_units) and \ machine_state.id not in seen_machines: continue instance_id = yield machine_state.get_instance_id() m = {"instance-id": instance_id \ if instance_id is not None else "pending"} if instance_id is not None: try: pm = yield machine_provider.get_machine(instance_id) m["dns-name"] = pm.dns_name m["instance-state"] = pm.state if (yield machine_state.has_agent()): # if the agent's connected, we're fine m["state"] = "running" else: units = (yield machine_state.get_all_service_unit_states()) for unit in units: unit_workflow_client = WorkflowStateClient(client, unit) if (yield unit_workflow_client.get_state()): # for unit to have a state, its agent must have # run, which implies the machine agent must have # been running correctly at some point in the past m["state"] = "down" break else: # otherwise we're probably just still waiting m["state"] = "not-started" except ProviderError: # The provider doesn't have machine information log.error( "Machine provider information missing: machine %s" % ( machine_state.id)) machine_data[machine_state.id] = m returnValue(state)
class MachineStateManagerTest(StateTestBase): @inlineCallbacks def setUp(self): yield super(MachineStateManagerTest, self).setUp() self.charm_state_manager = CharmStateManager(self.client) self.machine_state_manager = MachineStateManager(self.client) self.service_state_manager = ServiceStateManager(self.client) self.charm_state = yield self.charm_state_manager.add_charm_state( local_charm_id(self.charm), self.charm, "") @inlineCallbacks def add_service(self, service_name): service_state = yield self.service_state_manager.add_service_state( service_name, self.charm_state) returnValue(service_state) @inlineCallbacks def test_add_machine(self): """ Adding a machine state should register it in zookeeper. """ machine_state1 = yield self.machine_state_manager.add_machine_state() machine_state2 = yield self.machine_state_manager.add_machine_state() self.assertEquals(machine_state1.id, 0) self.assertEquals(machine_state1.internal_id, "machine-0000000000") self.assertEquals(machine_state2.id, 1) self.assertEquals(machine_state2.internal_id, "machine-0000000001") children = yield self.client.get_children("/machines") self.assertEquals(sorted(children), ["machine-0000000000", "machine-0000000001"]) topology = yield self.get_topology() self.assertTrue(topology.has_machine("machine-0000000000")) self.assertTrue(topology.has_machine("machine-0000000001")) @inlineCallbacks def test_machine_str_representation(self): """The str(machine) value includes the machine id. """ machine_state1 = yield self.machine_state_manager.add_machine_state() self.assertEqual( str(machine_state1), "<MachineState id:machine-%010d>" % (0)) @inlineCallbacks def test_remove_machine(self): """ Adding a machine state should register it in zookeeper. """ machine_state1 = yield self.machine_state_manager.add_machine_state() yield self.machine_state_manager.add_machine_state() removed = yield self.machine_state_manager.remove_machine_state( machine_state1.id) self.assertTrue(removed) children = yield self.client.get_children("/machines") self.assertEquals(sorted(children), ["machine-0000000001"]) topology = yield self.get_topology() self.assertFalse(topology.has_machine("machine-0000000000")) self.assertTrue(topology.has_machine("machine-0000000001")) # Removing a non-existing machine again won't fail, since the end # intention is preserved. This makes dealing with concurrency easier. # However, False will be returned in this case. removed = yield self.machine_state_manager.remove_machine_state( machine_state1.id) self.assertFalse(removed) @inlineCallbacks def test_remove_machine_with_agent(self): """Removing a machine with a connected machine agent should succeed. The removal signals intent to remove a working machine (with an agent) with the provisioning agent to remove it subsequently. """ # Add two machines. machine_state1 = yield self.machine_state_manager.add_machine_state() yield self.machine_state_manager.add_machine_state() # Connect an agent yield machine_state1.connect_agent() # Remove a machine removed = yield self.machine_state_manager.remove_machine_state( machine_state1.id) self.assertTrue(removed) # Verify the second one is still present children = yield self.client.get_children("/machines") self.assertEquals(sorted(children), ["machine-0000000001"]) # Verify the topology state. topology = yield self.get_topology() self.assertFalse(topology.has_machine("machine-0000000000")) self.assertTrue(topology.has_machine("machine-0000000001")) @inlineCallbacks def test_get_machine_and_check_attributes(self): """ Getting a machine state should be possible using both the user-oriented id and the internal id. """ yield self.machine_state_manager.add_machine_state() yield self.machine_state_manager.add_machine_state() machine_state = yield self.machine_state_manager.get_machine_state(0) self.assertEquals(machine_state.id, 0) machine_state = yield self.machine_state_manager.get_machine_state("0") self.assertEquals(machine_state.id, 0) yield self.assertFailure( self.machine_state_manager.get_machine_state("a"), MachineStateNotFound) @inlineCallbacks def test_get_machine_not_found(self): """ Getting a machine state which is not available should errback a meaningful error. """ # No state whatsoever. try: yield self.machine_state_manager.get_machine_state(0) except MachineStateNotFound, e: self.assertEquals(e.machine_id, 0) else:
class UnitDeployer(object): """Manages service unit deployment for an agent. """ def __init__(self, client, machine_id, juju_directory): """Initialize a Unit Deployer. :param client: A connected zookeeper client. :param str machine_id: the ID of the machine the agent is being run on. :param str juju_directory: the directory the agent is running in. """ self.client = client self.machine_id = machine_id self.juju_directory = juju_directory self.service_state_manager = ServiceStateManager(self.client) self.charm_state_manager = CharmStateManager(self.client) @property def charms_directory(self): return os.path.join(self.juju_directory, "charms") @inlineCallbacks def start(self, provider_type=None): """Starts the unit deployer.""" # Find out what provided the machine, and how to deploy units. if provider_type is None: settings = GlobalSettingsStateManager(self.client) provider_type = yield settings.get_provider_type() self.deploy_factory = get_deploy_factory(provider_type) if not os.path.exists(self.charms_directory): os.makedirs(self.charms_directory) def download_charm(self, charm_state): """Retrieve a charm from the provider storage to the local machine. :param charm_state: Charm to be downloaded """ log.debug("Downloading charm %s to %s", charm_state.id, self.charms_directory) return download_charm( self.client, charm_state.id, self.charms_directory) @inlineCallbacks def start_service_unit(self, service_unit_name): """Start a service unit on the machine. Downloads the charm, and extract it to the service unit directory, and launch the service unit agent within the unit directory. :param str service_unit_name: Service unit name to be started """ # Retrieve the charm state to get at the charm. unit_state = yield self.service_state_manager.get_unit_state( service_unit_name) charm_id = yield unit_state.get_charm_id() charm_state = yield self.charm_state_manager.get_charm_state( charm_id) # Download the charm. bundle = yield self.download_charm(charm_state) # Use deployment to setup the workspace and start the unit agent. deployment = self.deploy_factory( service_unit_name, self.juju_directory) log.debug("Using %r for %s in %s", deployment, service_unit_name, self.juju_directory) running = yield deployment.is_running() if not running: log.debug("Starting service unit %s...", service_unit_name) yield deployment.start( self.machine_id, self.client.servers, bundle) log.info("Started service unit %s", service_unit_name) @inlineCallbacks def kill_service_unit(self, service_unit_name): """Stop service unit and destroy disk state, ala SIGKILL or lxc-destroy :param str service_unit_name: Service unit name to be killed """ deployment = self.deploy_factory( service_unit_name, self.juju_directory) log.info("Stopping service unit %s...", service_unit_name) yield deployment.destroy() log.info("Stopped service unit %s", service_unit_name)