def add_relation(env_config, environment, verbose, log, *descriptors): """Add relation between relation endpoints described by `descriptors`""" provider = environment.get_machine_provider() client = yield provider.connect() relation_state_manager = RelationStateManager(client) service_state_manager = ServiceStateManager(client) endpoint_pairs = yield service_state_manager.join_descriptors( *descriptors) if verbose: log.info("Endpoint pairs: %s", endpoint_pairs) if len(endpoint_pairs) == 0: raise NoMatchingEndpoints() elif len(endpoint_pairs) > 1: raise AmbiguousRelation(descriptors, endpoint_pairs) # At this point we just have one endpoint pair. We need to pick # just one of the endpoints if it's a peer endpoint, since that's # our current API - join descriptors takes two descriptors, but # add_relation_state takes one or two endpoints. TODO consider # refactoring. endpoints = endpoint_pairs[0] if endpoints[0] == endpoints[1]: endpoints = endpoints[0:1] yield relation_state_manager.add_relation_state(*endpoints) yield client.close() log.info("Added %s relation to all service units.", endpoints[0].relation_type)
def _load_relations(self): """Recreate workflows for any relation we had previously stored. All relations (including those already departed) are stored in ._relations (and will be added or departed as usual); but only relations *not* already departed will be synchronized, to avoid errors caused by trying to access ZK state that may not exist any more. """ self._relations = {} if not os.path.exists(self._known_relations_path): return rsm = RelationStateManager(self._client) relations = yield rsm.get_relations_for_service(self._service) relations_by_id = dict((r.internal_relation_id, r) for r in relations) with open(self._known_relations_path) as f: known_relations = yaml.load(f.read()) for relation_id, relation_info in known_relations.items(): if relation_id in relations_by_id: # The service relation's still around: set up workflow as usual yield self._add_relation(relations_by_id[relation_id]) else: # The relation has departed. Create an *un*synchronized # workflow and place it in relations for detection and # removal (with hook-firing) in _process_service_changes. workflow = self._reconstruct_workflow( relation_id, relation_info["relation_name"], relation_info["relation_scope"]) self._relations[relation_id] = workflow
def destroy_service(config, environment, verbose, log, service_name): provider = environment.get_machine_provider() client = yield provider.connect() service_manager = ServiceStateManager(client) service_state = yield service_manager.get_service_state(service_name) if (yield service_state.is_subordinate()): # We can destroy the service if does not have relations. # That implies that principals have already been torn # down (or were never added). relation_manager = RelationStateManager(client) relations = yield relation_manager.get_relations_for_service( service_state) if relations: principal_service = None # if we have a container we can destroy the subordinate # (revisit in the future) for relation in relations: if relation.relation_scope != "container": continue services = yield relation.get_service_states() remote_service = [s for s in services if s.service_name != service_state.service_name][0] if not (yield remote_service.is_subordinate()): principal_service = remote_service break if principal_service: raise UnsupportedSubordinateServiceRemoval( service_state.service_name, principal_service.service_name) yield service_manager.remove_service_state(service_state) log.info("Service %r destroyed.", service_state.service_name)
def add_relation(env_config, environment, verbose, log, *descriptors): """Add relation between relation endpoints described by `descriptors`""" provider = environment.get_machine_provider() client = yield provider.connect() relation_state_manager = RelationStateManager(client) service_state_manager = ServiceStateManager(client) endpoint_pairs = yield service_state_manager.join_descriptors(*descriptors) if verbose: log.info("Endpoint pairs: %s", endpoint_pairs) if len(endpoint_pairs) == 0: raise NoMatchingEndpoints() elif len(endpoint_pairs) > 1: raise AmbiguousRelation(descriptors, endpoint_pairs) # At this point we just have one endpoint pair. We need to pick # just one of the endpoints if it's a peer endpoint, since that's # our current API - join descriptors takes two descriptors, but # add_relation_state takes one or two endpoints. TODO consider # refactoring. endpoints = endpoint_pairs[0] if endpoints[0] == endpoints[1]: endpoints = endpoints[0:1] yield relation_state_manager.add_relation_state(*endpoints) yield client.close() log.info("Added %s relation to all service units.", endpoints[0].relation_type)
def remove_relation(env_config, environment, verbose, log, *descriptors): """Remove relation between relation endpoints described by `descriptors`""" provider = environment.get_machine_provider() client = yield provider.connect() relation_state_manager = RelationStateManager(client) service_state_manager = ServiceStateManager(client) endpoint_pairs = yield service_state_manager.join_descriptors( *descriptors) if verbose: log.info("Endpoint pairs: %s", endpoint_pairs) if len(endpoint_pairs) == 0: raise NoMatchingEndpoints() elif len(endpoint_pairs) > 1: raise AmbiguousRelation(descriptors, endpoint_pairs) # At this point we just have one endpoint pair. We need to pick # just one of the endpoints if it's a peer endpoint, since that's # our current API - join descriptors takes two descriptors, but # add_relation_state takes one or two endpoints. TODO consider # refactoring. endpoints = endpoint_pairs[0] if endpoints[0] == endpoints[1]: endpoints = endpoints[0:1] relation_state = yield relation_state_manager.get_relation_state( *endpoints) # Look at both endpoints, if we are dealing with a container relation # decide if one end is a principal. service_pair = [] # ordered such that sub, principal is_container = False has_principal = True for ep in endpoints: if ep.relation_scope == "container": is_container = True service = yield service_state_manager.get_service_state( ep.service_name) if (yield service.is_subordinate()): service_pair.append(service) has_principal = True else: service_pair.insert(0, service) if is_container and len(service_pair) == 2 and has_principal: sub, principal = service_pair raise UnsupportedSubordinateServiceRemoval(sub.service_name, principal.service_name) yield relation_state_manager.remove_relation_state(relation_state) yield client.close() log.info("Removed %s relation from all service units.", endpoints[0].relation_type)
def test_deploy_adds_peer_relations(self): """Deploy automatically adds a peer relations.""" environment = self.config.get("firstenv") yield deploy.deploy( self.config, environment, self.unbundled_repo_path, "local:riak", None, logging.getLogger("deploy") ) service_manager = ServiceStateManager(self.client) service_state = yield service_manager.get_service_state("riak") relation_manager = RelationStateManager(self.client) relations = yield relation_manager.get_relations_for_service(service_state) self.assertEqual(len(relations), 1) self.assertEqual(relations[0].relation_name, "ring")
def test_deploy_adds_peer_relations(self): """Deploy automatically adds a peer relations.""" environment = self.config.get("firstenv") yield deploy.deploy(self.config, environment, self.unbundled_repo_path, "local:riak", None, logging.getLogger("deploy")) service_manager = ServiceStateManager(self.client) service_state = yield service_manager.get_service_state("riak") relation_manager = RelationStateManager(self.client) relations = yield relation_manager.get_relations_for_service( service_state) self.assertEqual(len(relations), 1) self.assertEqual(relations[0].relation_name, "ring")
def collect(scope, machine_provider, client, log): """Extract status information into nested dicts for rendering. `scope`: an optional list of name specifiers. Globbing based wildcards supported. Defaults to all units, services and relations. `machine_provider`: machine provider for the environment `client`: ZK client connection `log`: a Python stdlib logger. """ service_manager = ServiceStateManager(client) relation_manager = RelationStateManager(client) machine_manager = MachineStateManager(client) charm_manager = CharmStateManager(client) service_data = {} machine_data = {} state = dict(services=service_data, machines=machine_data) seen_machines = set() filter_services, filter_units = digest_scope(scope) services = yield service_manager.get_all_service_states() for service in services: if len(filter_services): found = False for filter_service in filter_services: if fnmatch(service.service_name, filter_service): found = True break if not found: continue unit_data = {} relation_data = {} charm_id = yield service.get_charm_id() charm = yield charm_manager.get_charm_state(charm_id) service_data[service.service_name] = dict(units=unit_data, charm=charm.id, relations=relation_data) exposed = yield service.get_exposed_flag() if exposed: service_data[service.service_name].update(exposed=exposed) units = yield service.get_all_unit_states() unit_matched = False relations = yield relation_manager.get_relations_for_service(service) for unit in units: if len(filter_units): found = False for filter_unit in filter_units: if fnmatch(unit.unit_name, filter_unit): found = True break if not found: continue u = unit_data[unit.unit_name] = dict() machine_id = yield unit.get_assigned_machine_id() u["machine"] = machine_id unit_workflow_client = WorkflowStateClient(client, unit) unit_state = yield unit_workflow_client.get_state() if not unit_state: u["state"] = "pending" else: unit_connected = yield unit.has_agent() u["state"] = unit_state if unit_connected else "down" if exposed: open_ports = yield unit.get_open_ports() u["open-ports"] = ["{port}/{proto}".format(**port_info) for port_info in open_ports] u["public-address"] = yield unit.get_public_address() # indicate we should include information about this # machine later seen_machines.add(machine_id) unit_matched = True # collect info on each relation for the service unit relation_status = {} for relation in relations: try: relation_unit = yield relation.get_unit_state(unit) except UnitRelationStateNotFound: # This exception will occur when relations are # established between services without service # units, and therefore never have any # corresponding service relation units. This # scenario does not occur in actual deployments, # but can happen in test circumstances. In # particular, it will happen with a misconfigured # provider, which exercises this codepath. continue # should not occur, but status should not fail relation_workflow_client = WorkflowStateClient( client, relation_unit) relation_workflow_state = \ yield relation_workflow_client.get_state() relation_status[relation.relation_name] = dict( state=relation_workflow_state) u["relations"] = relation_status # after filtering units check if any matched or remove the # service from the output if filter_units and not unit_matched: del service_data[service.service_name] continue for relation in relations: rel_services = yield relation.get_service_states() # A single related service implies a peer relation. More # imply a bi-directional provides/requires relationship. # In the later case we omit the local side of the relation # when reporting. if len(rel_services) > 1: # Filter out self from multi-service relations. rel_services = [ rsn for rsn in rel_services if rsn.service_name != service.service_name] if len(rel_services) > 1: raise ValueError("Unexpected relationship with more " "than 2 endpoints") rel_service = rel_services[0] relation_data[relation.relation_name] = rel_service.service_name machines = yield machine_manager.get_all_machine_states() for machine_state in machines: if (filter_services or filter_units) and \ machine_state.id not in seen_machines: continue instance_id = yield machine_state.get_instance_id() m = {"instance-id": instance_id \ if instance_id is not None else "pending"} if instance_id is not None: try: pm = yield machine_provider.get_machine(instance_id) m["dns-name"] = pm.dns_name m["instance-state"] = pm.state if (yield machine_state.has_agent()): # if the agent's connected, we're fine m["state"] = "running" else: units = (yield machine_state.get_all_service_unit_states()) for unit in units: unit_workflow_client = WorkflowStateClient(client, unit) if (yield unit_workflow_client.get_state()): # for unit to have a state, its agent must have # run, which implies the machine agent must have # been running correctly at some point in the past m["state"] = "down" break else: # otherwise we're probably just still waiting m["state"] = "not-started" except ProviderError: # The provider doesn't have machine information log.error( "Machine provider information missing: machine %s" % ( machine_state.id)) machine_data[machine_state.id] = m returnValue(state)
def deploy(env_config, environment, repository_path, charm_name, service_name, log, config_file=None, num_units=1): """Deploy a charm within an environment. This will publish the charm to the environment, creating a service from the charm, and get it set to be launched on a new machine. """ repo, charm_url = resolve( charm_name, repository_path, environment.default_series) # Validate config options prior to deployment attempt service_options = {} service_name = service_name or charm_url.name if config_file: service_options = parse_config_options(config_file, service_name) charm = yield repo.find(charm_url) charm_id = str(charm_url.with_revision(charm.get_revision())) provider = environment.get_machine_provider() placement_policy = provider.get_placement_policy() client = yield provider.connect() try: storage = yield provider.get_file_storage() service_manager = ServiceStateManager(client) environment_state_manager = EnvironmentStateManager(client) yield environment_state_manager.set_config_state( env_config, environment.name) # Publish the charm to juju publisher = CharmPublisher(client, storage) yield publisher.add_charm(charm_id, charm) result = yield publisher.publish() # In future we might have multiple charms be published at # the same time. For now, extract the charm_state from the # list. charm_state = result[0] # Create the service state service_state = yield service_manager.add_service_state( service_name, charm_state) # Use the charm's ConfigOptions instance to validate service # options.. Invalid options passed will thrown an exception # and prevent the deploy. state = yield service_state.get_config() charm_config = yield charm_state.get_config() # return the validated options with the defaults included service_options = charm_config.validate(service_options) state.update(service_options) yield state.write() # Create desired number of service units for i in xrange(num_units): unit_state = yield service_state.add_unit_state() yield place_unit(client, placement_policy, unit_state) # Check if we have any peer relations to establish if charm.metadata.peers: relation_manager = RelationStateManager(client) for peer_name, peer_info in charm.metadata.peers.items(): yield relation_manager.add_relation_state( RelationEndpoint(service_name, peer_info["interface"], peer_name, "peer")) log.info("Charm deployed as service: %r", service_name) finally: yield client.close()
def deploy(env_config, environment, repository_path, charm_name, service_name, log, constraint_strs, config_file=None, upgrade=False, num_units=1): """Deploy a charm within an environment. This will publish the charm to the environment, creating a service from the charm, and get it set to be launched on a new machine. If --repository is not specified, it will be taken from the environment variable JUJU_REPOSITORY. """ repo, charm_url = resolve( charm_name, repository_path, environment.default_series) log.info("Searching for charm %s in %s" % (charm_url, repo)) charm = yield repo.find(charm_url) if upgrade: if repo.type != "local" or charm.type != "dir": raise CharmError( charm.path, "Only local directory charms can be upgraded on deploy") charm.set_revision(charm.get_revision() + 1) charm_id = str(charm_url.with_revision(charm.get_revision())) # Validate config options prior to deployment attempt service_options = {} service_name = service_name or charm_url.name if config_file: service_options = parse_config_options( config_file, service_name, charm) charm = yield repo.find(charm_url) charm_id = str(charm_url.with_revision(charm.get_revision())) provider = environment.get_machine_provider() placement_policy = provider.get_placement_policy() constraint_set = yield provider.get_constraint_set() constraints = constraint_set.parse(constraint_strs) client = yield provider.connect() try: yield legacy.check_constraints(client, constraint_strs) yield legacy.check_environment( client, provider.get_legacy_config_keys()) yield sync_environment_state(client, env_config, environment.name) # Publish the charm to juju storage = yield provider.get_file_storage() publisher = CharmPublisher(client, storage) yield publisher.add_charm(charm_id, charm) result = yield publisher.publish() # In future we might have multiple charms be published at # the same time. For now, extract the charm_state from the # list. charm_state = result[0] # Create the service state service_manager = ServiceStateManager(client) service_state = yield service_manager.add_service_state( service_name, charm_state, constraints) # Use the charm's ConfigOptions instance to validate service # options.. Invalid options passed will thrown an exception # and prevent the deploy. state = yield service_state.get_config() charm_config = yield charm_state.get_config() # return the validated options with the defaults included service_options = charm_config.validate(service_options) state.update(service_options) yield state.write() # Create desired number of service units if (yield service_state.is_subordinate()): log.info("Subordinate %r awaiting relationship " "to principal for deployment.", service_name) else: for i in xrange(num_units): unit_state = yield service_state.add_unit_state() yield place_unit(client, placement_policy, unit_state) # Check if we have any peer relations to establish if charm.metadata.peers: relation_manager = RelationStateManager(client) for peer_name, peer_info in charm.metadata.peers.items(): yield relation_manager.add_relation_state( RelationEndpoint(service_name, peer_info["interface"], peer_name, "peer")) log.info("Charm deployed as service: %r", service_name) finally: yield client.close()
def resolved(config, environment, verbose, log, unit_name, relation_name, retry): """Mark an error as resolved in a unit or unit relation. If one of a unit's charm non-relation hooks returns a non-zero exit status, the entire unit can be considered to be in a non-running state. As a resolution, the the unit can be manually returned a running state via the juju resolved command. Optionally this command can also rerun the failed hook. This resolution also applies separately to each of the unit's relations. If one of the relation-hooks failed. In that case there is no notion of retrying (the change is gone), but resolving will allow additional relation hooks for that relation to proceed. """ provider = environment.get_machine_provider() client = yield provider.connect() service_manager = ServiceStateManager(client) relation_manager = RelationStateManager(client) unit_state = yield service_manager.get_unit_state(unit_name) service_state = yield service_manager.get_service_state( unit_name.split("/")[0]) retry = retry and RETRY_HOOKS or NO_HOOKS if not relation_name: running, workflow_state = yield is_unit_running(client, unit_state) if running: log.info("Unit %r already running: %s", unit_name, workflow_state) client.close() returnValue(False) yield unit_state.set_resolved(retry) log.info("Marked unit %r as resolved", unit_name) returnValue(True) # Check for the matching relations service_relations = yield relation_manager.get_relations_for_service( service_state) service_relations = [ sr for sr in service_relations if sr.relation_name == relation_name ] if not service_relations: raise RelationStateNotFound() # Verify the relations are in need of resolution. resolved_relations = {} for service_relation in service_relations: unit_relation = yield service_relation.get_unit_state(unit_state) running, state = yield is_relation_running(client, unit_relation) if not running: resolved_relations[unit_relation.internal_relation_id] = retry if not resolved_relations: log.warning("Matched relations are all running") client.close() returnValue(False) # Mark the relations as resolved. yield unit_state.set_relation_resolved(resolved_relations) log.info("Marked unit %r relation %r as resolved", unit_name, relation_name) client.close()
def collect(scope, machine_provider, client, log): """Extract status information into nested dicts for rendering. `scope`: an optional list of name specifiers. Globbing based wildcards supported. Defaults to all units, services and relations. `machine_provider`: machine provider for the environment `client`: ZK client connection `log`: a Python stdlib logger. """ service_manager = ServiceStateManager(client) relation_manager = RelationStateManager(client) machine_manager = MachineStateManager(client) charm_manager = CharmStateManager(client) service_data = {} machine_data = {} state = dict(services=service_data, machines=machine_data) seen_machines = set() filter_services, filter_units = digest_scope(scope) services = yield service_manager.get_all_service_states() for service in services: if len(filter_services): found = False for filter_service in filter_services: if fnmatch(service.service_name, filter_service): found = True break if not found: continue unit_data = {} relation_data = {} charm_id = yield service.get_charm_id() charm = yield charm_manager.get_charm_state(charm_id) service_data[service.service_name] = dict(units=unit_data, charm=charm.id, relations=relation_data) exposed = yield service.get_exposed_flag() if exposed: service_data[service.service_name].update(exposed=exposed) units = yield service.get_all_unit_states() unit_matched = False relations = yield relation_manager.get_relations_for_service(service) for unit in units: if len(filter_units): found = False for filter_unit in filter_units: if fnmatch(unit.unit_name, filter_unit): found = True break if not found: continue u = unit_data[unit.unit_name] = dict() machine_id = yield unit.get_assigned_machine_id() u["machine"] = machine_id unit_workflow_client = WorkflowStateClient(client, unit) unit_state = yield unit_workflow_client.get_state() if not unit_state: u["state"] = "pending" else: unit_connected = yield unit.has_agent() u["state"] = unit_state if unit_connected else "down" if exposed: open_ports = yield unit.get_open_ports() u["open-ports"] = [ "{port}/{proto}".format(**port_info) for port_info in open_ports ] u["public-address"] = yield unit.get_public_address() # indicate we should include information about this # machine later seen_machines.add(machine_id) unit_matched = True # collect info on each relation for the service unit relation_status = {} for relation in relations: try: relation_unit = yield relation.get_unit_state(unit) except UnitRelationStateNotFound: # This exception will occur when relations are # established between services without service # units, and therefore never have any # corresponding service relation units. This # scenario does not occur in actual deployments, # but can happen in test circumstances. In # particular, it will happen with a misconfigured # provider, which exercises this codepath. continue # should not occur, but status should not fail relation_workflow_client = WorkflowStateClient( client, relation_unit) relation_workflow_state = \ yield relation_workflow_client.get_state() relation_status[relation.relation_name] = dict( state=relation_workflow_state) u["relations"] = relation_status # after filtering units check if any matched or remove the # service from the output if filter_units and not unit_matched: del service_data[service.service_name] continue for relation in relations: rel_services = yield relation.get_service_states() # A single related service implies a peer relation. More # imply a bi-directional provides/requires relationship. # In the later case we omit the local side of the relation # when reporting. if len(rel_services) > 1: # Filter out self from multi-service relations. rel_services = [ rsn for rsn in rel_services if rsn.service_name != service.service_name ] if len(rel_services) > 1: raise ValueError("Unexpected relationship with more " "than 2 endpoints") rel_service = rel_services[0] relation_data[relation.relation_name] = rel_service.service_name machines = yield machine_manager.get_all_machine_states() for machine_state in machines: if (filter_services or filter_units) and \ machine_state.id not in seen_machines: continue instance_id = yield machine_state.get_instance_id() m = {"instance-id": instance_id \ if instance_id is not None else "pending"} if instance_id is not None: try: pm = yield machine_provider.get_machine(instance_id) m["dns-name"] = pm.dns_name m["instance-state"] = pm.state if (yield machine_state.has_agent()): # if the agent's connected, we're fine m["state"] = "running" else: units = (yield machine_state.get_all_service_unit_states()) for unit in units: unit_workflow_client = WorkflowStateClient( client, unit) if (yield unit_workflow_client.get_state()): # for unit to have a state, its agent must have # run, which implies the machine agent must have # been running correctly at some point in the past m["state"] = "down" break else: # otherwise we're probably just still waiting m["state"] = "not-started" except ProviderError: # The provider doesn't have machine information log.error("Machine provider information missing: machine %s" % (machine_state.id)) machine_data[machine_state.id] = m returnValue(state)
def resolved( config, environment, verbose, log, unit_name, relation_name, retry): """Mark an error as resolved in a unit or unit relation. If one of a unit's charm non-relation hooks returns a non-zero exit status, the entire unit can be considered to be in a non-running state. As a resolution, the the unit can be manually returned a running state via the juju resolved command. Optionally this command can also rerun the failed hook. This resolution also applies separately to each of the unit's relations. If one of the relation-hooks failed. In that case there is no notion of retrying (the change is gone), but resolving will allow additional relation hooks for that relation to proceed. """ provider = environment.get_machine_provider() client = yield provider.connect() service_manager = ServiceStateManager(client) relation_manager = RelationStateManager(client) unit_state = yield service_manager.get_unit_state(unit_name) service_state = yield service_manager.get_service_state( unit_name.split("/")[0]) retry = retry and RETRY_HOOKS or NO_HOOKS if not relation_name: running, workflow_state = yield is_unit_running(client, unit_state) if running: log.info("Unit %r already running: %s", unit_name, workflow_state) client.close() returnValue(False) yield unit_state.set_resolved(retry) log.info("Marked unit %r as resolved", unit_name) returnValue(True) # Check for the matching relations service_relations = yield relation_manager.get_relations_for_service( service_state) service_relations = [ sr for sr in service_relations if sr.relation_name == relation_name] if not service_relations: raise RelationStateNotFound() # Verify the relations are in need of resolution. resolved_relations = {} for service_relation in service_relations: unit_relation = yield service_relation.get_unit_state(unit_state) running, state = yield is_relation_running(client, unit_relation) if not running: resolved_relations[unit_relation.internal_relation_id] = retry if not resolved_relations: log.warning("Matched relations are all running") client.close() returnValue(False) # Mark the relations as resolved. yield unit_state.set_relation_resolved(resolved_relations) log.info( "Marked unit %r relation %r as resolved", unit_name, relation_name) client.close()