def test_persistence(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmKeystone, AssignmentType.KVM) cons1 = PropertyMock(return_value={}) type(self.mock_machine).constraints = cons1 cons2 = PropertyMock(return_value={'cpu': 8}) type(self.mock_machine_2).constraints = cons2 with TemporaryFile(mode='w+', encoding='utf-8') as tempf: self.pc.save(tempf) tempf.seek(0) print(tempf.read()) tempf.seek(0) newpc = PlacementController(self.mock_maas_state, self.conf) newpc.load(tempf) self.assertEqual(self.pc.assignments, newpc.assignments) self.assertEqual(self.pc.machines_pending(), newpc.machines_pending()) self.assertEqual(self.pc.assigned_charm_classes(), newpc.assigned_charm_classes()) m2 = next((m for m in newpc.machines_pending() if m.instance_id == 'fake-instance-id-2')) self.assertEqual(m2.constraints, {'cpu': 8})
def test_persistence(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmKeystone, AssignmentType.KVM) cons1 = PropertyMock(return_value={}) type(self.mock_machine).constraints = cons1 cons2 = PropertyMock(return_value={'cpu': 8}) type(self.mock_machine_2).constraints = cons2 with TemporaryFile(mode='w+', encoding='utf-8') as tempf: self.pc.save(tempf) tempf.seek(0) print(tempf.read()) tempf.seek(0) newpc = PlacementController( self.mock_maas_state, self.conf) newpc.load(tempf) self.assertEqual(self.pc.assignments, newpc.assignments) self.assertEqual(self.pc.machines_pending(), newpc.machines_pending()) self.assertEqual(self.pc.assigned_charm_classes(), newpc.assigned_charm_classes()) m2 = next((m for m in newpc.machines_pending() if m.instance_id == 'fake-instance-id-2')) self.assertEqual(m2.constraints, {'cpu': 8})
class Controller: """ Controller for Juju deployments and Maas machine init """ def __init__(self, ui, config, loop): self.ui = ui self.ui.controller = self self.config = config self.loop = loop self.juju_state = None self.juju = None self.maas = None self.maas_state = None self.nodes = [] self.juju_m_idmap = None # for single, {instance_id: machine id} self.deployed_charm_classes = [] self.placement_controller = None self.config.setopt('current_state', ControllerState.INSTALL_WAIT.value) def update(self, *args, **kwargs): """Render UI according to current state and reset timer PegasusGUI only. """ interval = 1 current_state = self.config.getopt('current_state') if current_state == ControllerState.PLACEMENT: self.ui.render_placement_view(self.loop, self.config, self.commit_placement) elif current_state == ControllerState.INSTALL_WAIT: self.ui.render_node_install_wait(message="Waiting...") interval = self.config.node_install_wait_interval elif current_state == ControllerState.ADD_SERVICES: self.ui.render_add_services_dialog(self.deploy_new_services, self.cancel_add_services) elif current_state == ControllerState.SERVICES: self.update_node_states() else: raise Exception("Internal error, unexpected display " "state '{}'".format(current_state)) self.loop.redraw_screen() self.loop.set_alarm_in(interval, self.update) def update_node_states(self): """ Updating node states PegasusGUI only """ if not self.juju_state: return deployed_services = sorted(self.juju_state.services, key=attrgetter('service_name')) deployed_service_names = [s.service_name for s in deployed_services] charm_classes = sorted( [m.__charm_class__ for m in utils.load_charms(self.config.getopt('charm_plugin_dir')) if m.__charm_class__.charm_name in deployed_service_names], key=attrgetter('charm_name')) self.nodes = list(zip(charm_classes, deployed_services)) for n in deployed_services: for u in n.units: if u.is_horizon and u.agent_state == "started": self.ui.set_dashboard_url( u.public_address, 'ubuntu', self.config.getopt('openstack_password')) if u.is_jujugui and u.agent_state == "started": self.ui.set_jujugui_url(u.public_address) if len(self.nodes) == 0: return else: self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) def authenticate_juju(self): if not len(self.config.juju_env['state-servers']) > 0: state_server = 'localhost:17070' else: state_server = self.config.juju_env['state-servers'][0] self.juju = JujuClient( url=path.join('wss://', state_server), password=self.config.juju_api_password) self.juju.login() self.juju_state = JujuState(self.juju) log.debug('Authenticated against juju api.') def initialize(self): """Authenticates against juju/maas and sets up placement controller.""" if getenv("FAKE_API_DATA"): self.juju_state = FakeJujuState() self.maas_state = FakeMaasState() else: self.authenticate_juju() if self.config.is_multi(): creds = self.config.getopt('maascreds') self.maas, self.maas_state = connect_to_maas(creds) self.placement_controller = PlacementController( self.maas_state, self.config) if path.exists(self.config.placements_filename): with open(self.config.placements_filename, 'r') as pf: self.placement_controller.load(pf) self.ui.status_info_message("Loaded placements from file.") log.info("Loaded placements from " "'{}'".format(self.config.placements_filename)) # If we have no machines (so we are a fresh install) but # are reading a placements.yaml from a previous install, # so it has no assignments, only deployments, tell the # controller to use the deployments in the file as # assignments: if len(self.placement_controller.machines_pending()) == 0 and \ len(self.juju_state.machines()) == 0: self.placement_controller.set_assignments_from_deployments() log.info("Using deployments saved from previous install" " as new assignments.") else: if self.config.is_multi(): def_assignments = self.placement_controller.gen_defaults() else: def_assignments = self.placement_controller.gen_single() self.placement_controller.set_all_assignments(def_assignments) pfn = self.config.placements_filename self.placement_controller.set_autosave_filename(pfn) self.placement_controller.do_autosave() if self.config.is_single(): if self.config.getopt('headless'): self.begin_deployment() else: self.begin_deployment_async() return if self.config.getopt('edit_placement') or \ not self.placement_controller.can_deploy(): self.config.setopt( 'current_state', ControllerState.PLACEMENT.value) else: if self.config.getopt('headless'): self.begin_deployment() else: self.begin_deployment_async() @utils.async def wait_for_maas_async(self): """ explicit async method """ self.wait_for_maas() def wait_for_maas(self): """ install and configure maas """ random_status = ["Packages are being installed to a MAAS container.", "There's a few packages, it'll take just a minute", "Checkout http://maas.ubuntu.com/ while you wait."] is_connected = False count = 0 while not is_connected: self.ui.render_node_install_wait(message="Waiting...") self.ui.status_info_message( random_status[random.randrange(len(random_status))]) count = count + 1 self.ui.status_info_message( "Waiting for MAAS (tries {0})".format(count)) uri = path.join('http://', utils.container_ip('maas'), 'MAAS') log.debug("Checking MAAS availability ({0})".format(uri)) try: res = requests.get(uri) is_connected = res.ok except: self.ui.status_info_message("Waiting for MAAS to be installed") time.sleep(10) # Render nodeview, even though nothing is there yet. self.initialize() def commit_placement(self): self.config.setopt('current_state', ControllerState.SERVICES.value) self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() if self.config.getopt('headless'): self.begin_deployment() else: self.begin_deployment_async() @utils.async def begin_deployment_async(self): """ async deployment """ self.begin_deployment() def begin_deployment(self): if self.config.is_multi(): # now all machines are added self.maas.tag_fpi(self.maas.nodes) self.maas.nodes_accept_all() self.maas.tag_name(self.maas.nodes) while not self.all_maas_machines_ready(): time.sleep(3) self.add_machines_to_juju_multi() elif self.config.is_single(): self.add_machines_to_juju_single() # Quiet out some of the logging _previous_summary = None while not self.all_juju_machines_started(): sd = self.juju_state.machines_summary() summary = ", ".join(["{} {}".format(v, k) for k, v in sd.items()]) if summary != _previous_summary: self.ui.status_info_message("Waiting for machines to " "start: {}".format(summary)) _previous_summary = summary time.sleep(1) if len(self.juju_state.machines()) == 0: raise Exception("Expected some juju machines started.") self.config.setopt('current_state', ControllerState.SERVICES.value) if self.config.is_single(): controller_machine = self.juju_m_idmap['controller'] self.configure_lxc_network(controller_machine) for juju_machine_id in self.juju_m_idmap.values(): self.run_apt_go_fast(juju_machine_id) if self.config.is_single(): self.set_unique_hostnames() self.deploy_using_placement() self.wait_for_deployed_services_ready() self.enqueue_deployed_charms() def set_unique_hostnames(self): """checks for and ensures unique hostnames, so e.g. ceph can assume that. FIXME: Remove once http://pad.lv/1326091 is fixed """ count = 0 for machine in self.juju_state.machines(): count += 1 hostname = machine.machine.get('InstanceId', "ubuntu-{}".format(count)) log.debug("Setting hostname of {} to {}".format(machine, hostname)) juju_home = self.config.juju_home(use_expansion=True) utils.remote_run( machine.machine_id, cmds="echo {} | sudo tee /etc/hostname".format(hostname), juju_home=juju_home) utils.remote_run( machine.machine_id, cmds="sudo hostname {}".format(hostname), juju_home=juju_home) def all_maas_machines_ready(self): self.maas_state.invalidate_nodes_cache() needed = set([m.instance_id for m in self.placement_controller.machines_pending()]) ready = set([m.instance_id for m in self.maas_state.machines(MaasMachineStatus.READY)]) allocated = set([m.instance_id for m in self.maas_state.machines(MaasMachineStatus.ALLOCATED) ]) summary = ", ".join(["{} {}".format(v, k) for k, v in self.maas_state.machines_summary().items()]) self.ui.status_info_message("Waiting for {} maas machines to be ready." " Machines Summary: {}".format(len(needed), summary)) if not needed.issubset(ready.union(allocated)): return False return True def add_machines_to_juju_multi(self): """Adds each of the machines used for the placement to juju, if it isn't already there.""" self.juju_state.invalidate_status_cache() juju_ids = [jm.instance_id for jm in self.juju_state.machines()] machine_params = [] for maas_machine in self.placement_controller.machines_pending(): if maas_machine.instance_id in juju_ids: # ignore machines that are already added to juju continue cd = dict(tags=[maas_machine.system_id]) mp = dict(Series="", ContainerType="", ParentId="", Constraints=cd, Jobs=[JujuJobs.HostUnits]) machine_params.append(mp) if len(machine_params) > 0: import pprint log.debug("calling add_machines with params:" " {}".format(pprint.pformat(machine_params))) rv = self.juju.add_machines(machine_params) log.debug("add_machines returned '{}'".format(rv)) def all_juju_machines_started(self): self.juju_state.invalidate_status_cache() n_needed = len(self.placement_controller.machines_pending()) n_allocated = len([jm for jm in self.juju_state.machines() if jm.agent_state == 'started']) return n_allocated >= n_needed def add_machines_to_juju_single(self): self.juju_state.invalidate_status_cache() self.juju_m_idmap = {} for jm in self.juju_state.machines(): response = self.juju.get_annotations(jm.machine_id, 'machine') ann = response['Annotations'] if 'instance_id' in ann: self.juju_m_idmap[ann['instance_id']] = jm.machine_id log.debug("existing juju machines: {}".format(self.juju_m_idmap)) def get_created_machine_id(iid, response): d = response['Machines'][0] if d['Error']: raise Exception("Error adding machine '{}':" "{}".format(iid, response)) else: return d['Machine'] for machine in self.placement_controller.machines_pending(): if machine.instance_id in self.juju_m_idmap: machine.machine_id = self.juju_m_idmap[machine.instance_id] log.debug("machine instance_id {} already exists as #{}, " "skipping".format(machine.instance_id, machine.machine_id)) continue log.debug("adding machine with " "constraints={}".format(machine.constraints)) rv = self.juju.add_machine(constraints=machine.constraints) m_id = get_created_machine_id(machine.instance_id, rv) machine.machine_id = m_id rv = self.juju.set_annotations(m_id, 'machine', {'instance_id': machine.instance_id}) self.juju_m_idmap[machine.instance_id] = m_id def run_apt_go_fast(self, machine_id): utils.remote_cp(machine_id, src=path.join(self.config.share_path, "tools/apt-go-fast"), dst="/tmp/apt-go-fast", juju_home=self.config.juju_home(use_expansion=True)) utils.remote_run(machine_id, cmds="sudo sh /tmp/apt-go-fast", juju_home=self.config.juju_home(use_expansion=True)) def configure_lxc_network(self, machine_id): # upload our lxc-host-only template and setup bridge log.info('Copying network specifications to machine') srcpath = path.join(self.config.tmpl_path, 'lxc-host-only') destpath = "/tmp/lxc-host-only" utils.remote_cp(machine_id, src=srcpath, dst=destpath, juju_home=self.config.juju_home(use_expansion=True)) log.debug('Updating network configuration for machine') utils.remote_run(machine_id, cmds="sudo chmod +x /tmp/lxc-host-only", juju_home=self.config.juju_home(use_expansion=True)) utils.remote_run(machine_id, cmds="sudo /tmp/lxc-host-only", juju_home=self.config.juju_home(use_expansion=True)) def deploy_using_placement(self): """Deploy charms using machine placement from placement controller, waiting for any deferred charms. Then enqueue all charms for further processing and return. """ self.ui.status_info_message("Verifying service deployments") assigned_ccs = self.placement_controller.assigned_charm_classes() charm_classes = sorted(assigned_ccs, key=attrgetter('deploy_priority')) def undeployed_charm_classes(): return [c for c in charm_classes if c not in self.deployed_charm_classes] def update_pending_display(): pending_names = [c.display_name for c in undeployed_charm_classes()] self.ui.set_pending_deploys(pending_names) while len(undeployed_charm_classes()) > 0: update_pending_display() for charm_class in undeployed_charm_classes(): self.ui.status_info_message( "Checking if {c} is deployed".format( c=charm_class.display_name)) service_names = [s.service_name for s in self.juju_state.services] if charm_class.charm_name in service_names: self.ui.status_info_message( "{c} is already deployed, skipping".format( c=charm_class.display_name)) self.deployed_charm_classes.append(charm_class) continue err = self.try_deploy(charm_class) name = charm_class.display_name if err: log.debug( "{} is waiting for another service, will" " re-try in a few seconds".format(name)) break else: log.debug("Issued deploy for {}".format(name)) self.deployed_charm_classes.append(charm_class) self.juju_state.invalidate_status_cache() update_pending_display() num_remaining = len(undeployed_charm_classes()) if num_remaining > 0: log.debug("{} charms pending deploy.".format(num_remaining)) log.debug("deployed_charm_classes={}".format( PrettyLog(self.deployed_charm_classes))) time.sleep(5) update_pending_display() def try_deploy(self, charm_class): "returns True if deploy is deferred and should be tried again." charm = charm_class(juju=self.juju, juju_state=self.juju_state, ui=self.ui, config=self.config) asts = self.placement_controller.get_assignments(charm_class) errs = [] first_deploy = True for atype, ml in asts.items(): for machine in ml: mspec = self.get_machine_spec(machine, atype) if mspec is None: errs.append(machine) continue if first_deploy: msg = "Deploying {c}".format(c=charm_class.display_name) if mspec != '': msg += " to machine {mspec}".format(mspec=mspec) self.ui.status_info_message(msg) deploy_err = charm.deploy(mspec) if deploy_err: errs.append(machine) else: first_deploy = False else: # service already deployed, need to add-unit msg = ("Adding one unit of " "{c}".format(c=charm_class.display_name)) if mspec != '': msg += " to machine {mspec}".format(mspec=mspec) self.ui.status_info_message(msg) deploy_err = charm.add_unit(machine_spec=mspec) if deploy_err: errs.append(machine) if not deploy_err: self.placement_controller.mark_deployed(machine, charm_class, atype) had_err = len(errs) > 0 if had_err and not self.config.getopt('headless'): log.warning("deferred deploying to these machines: {}".format( errs)) return had_err def get_machine_spec(self, maas_machine, atype): """Given a machine and assignment type, return a juju machine spec. Returns None on errors, and '' for the subordinate char placeholder. """ if self.placement_controller.is_placeholder(maas_machine.instance_id): # placeholder machines do not use a machine spec return "" jm = next((m for m in self.juju_state.machines() if (m.instance_id == maas_machine.instance_id or m.machine_id == maas_machine.machine_id)), None) if jm is None: log.error("could not find juju machine matching {}" " (instance id {})".format(maas_machine, maas_machine.instance_id)) return None if atype == AssignmentType.BareMetal \ or atype == AssignmentType.DEFAULT: return jm.machine_id elif atype == AssignmentType.LXC: return "lxc:{}".format(jm.machine_id) elif atype == AssignmentType.KVM: return "kvm:{}".format(jm.machine_id) else: log.error("unexpected atype: {}".format(atype)) return None def wait_for_deployed_services_ready(self): """ Blocks until all deployed services attached units are in a 'started' state """ if not self.juju_state: return self.ui.status_info_message( "Waiting for deployed services to be in a ready state.") not_ready_len = 0 while not self.juju_state.all_agents_started(): not_ready = [(a, b) for a, b in self.juju_state.get_agent_states() if b != 'started'] if len(not_ready) == not_ready_len: time.sleep(3) continue not_ready_len = len(not_ready) log.info("Checking availability of {} ".format( ", ".join(["{}:{}".format(a, b) for a, b in not_ready]))) time.sleep(3) self.ui.status_info_message( "Processing relations and finalizing services") def enqueue_deployed_charms(self): """Send all deployed charms to CharmQueue for relation setting and post-proc. """ charm_q = CharmQueue(ui=self.ui, config=self.config, juju=self.juju, juju_state=self.juju_state, deployed_charms=self.deployed_charm_classes) if self.config.getopt('headless'): charm_q.watch_relations() charm_q.watch_post_proc() else: charm_q.watch_relations_async() charm_q.watch_post_proc_async() charm_q.is_running = True # Exit cleanly if we've finished all deploys, relations, # post processing, and running in headless mode. if self.config.getopt('headless'): while not self.config.getopt('deploy_complete'): self.ui.status_info_message( "Waiting for services to be started.") # FIXME: Is this needed? # time.sleep(10) self.ui.status_info_message( "All services deployed, relations set, and started") self.loop.exit(0) self.ui.status_info_message( "Services deployed, relationships may still be" " pending. Please wait for all services to be checked before" " deploying compute nodes") self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() @utils.async def deploy_new_services(self): """Deploys newly added services in background thread. Does not attempt to create new machines. """ self.config.setopt('current_state', ControllerState.SERVICES.value) self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() self.deploy_using_placement() self.wait_for_deployed_services_ready() self.set_unique_hostnames() self.enqueue_deployed_charms() def cancel_add_services(self): """User cancelled add-services screen. Just redisplay services view. """ self.config.setopt('current_state', ControllerState.SERVICES.value) self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() def start(self): """ Starts UI loop """ if self.config.getopt('headless'): self.initialize() else: self.ui.status_info_message("Welcome") self.initialize() self.loop.register_callback('refresh_display', self.update) self.loop.set_alarm_in(0, self.update) self.loop.run() self.loop.close()
class Controller: """ Controller for Juju deployments and Maas machine init """ def __init__(self, ui, config, loop): self.ui = ui self.ui.controller = self self.config = config self.loop = loop self.juju_state = None self.juju = None self.maas = None self.maas_state = None self.nodes = [] self.juju_m_idmap = None # for single, {instance_id: machine id} self.deployed_charm_classes = [] self.placement_controller = None if not self.config.getopt('current_state'): self.config.setopt('current_state', ControllerState.INSTALL_WAIT.value) def update(self, *args, **kwargs): """Render UI according to current state and reset timer PegasusGUI only. """ interval = 1 current_state = self.config.getopt('current_state') if current_state == ControllerState.PLACEMENT: self.ui.render_placement_view(self.loop, self.config, self.commit_placement) elif current_state == ControllerState.INSTALL_WAIT: if self.ui.node_install_wait_view is None: self.ui.render_node_install_wait( message="Installer is initializing nodes. Please wait.") else: self.ui.node_install_wait_view.redraw_kitt() interval = self.config.node_install_wait_interval elif current_state == ControllerState.ADD_SERVICES: def submit_deploy(): async.submit(self.deploy_new_services, self.ui.show_exception_message) self.ui.render_add_services_dialog( submit_deploy, self.cancel_add_services) elif current_state == ControllerState.SERVICES: self.update_node_states() else: raise Exception("Internal error, unexpected display " "state '{}'".format(current_state)) self.loop.redraw_screen() AlarmMonitor.add_alarm(self.loop.set_alarm_in(interval, self.update), "core-controller-update") def update_node_states(self): """ Updating node states PegasusGUI only """ if not self.juju_state: return deployed_services = sorted(self.juju_state.services, key=attrgetter('service_name')) deployed_service_names = [s.service_name for s in deployed_services] charm_classes = sorted( [m.__charm_class__ for m in utils.load_charms(self.config.getopt('charm_plugin_dir')) if m.__charm_class__.charm_name in deployed_service_names], key=attrgetter('charm_name')) self.nodes = list(zip(charm_classes, deployed_services)) if len(self.nodes) == 0: return else: if not self.ui.services_view: self.ui.render_services_view( self.nodes, self.juju_state, self.maas_state, self.config) else: self.ui.refresh_services_view(self.nodes, self.config) def authenticate_juju(self): if not len(self.config.juju_env['state-servers']) > 0: state_server = 'localhost:17070' else: state_server = self.config.juju_env['state-servers'][0] self.juju = JujuClient( url=path.join('wss://', state_server), password=self.config.juju_api_password) self.juju.login() self.juju_state = JujuState(self.juju) log.debug('Authenticated against juju api.') def initialize(self): """Authenticates against juju/maas and sets up placement controller.""" if getenv("FAKE_API_DATA"): self.juju_state = FakeJujuState() self.maas_state = FakeMaasState() else: self.authenticate_juju() if self.config.is_multi(): creds = self.config.getopt('maascreds') self.maas, self.maas_state = connect_to_maas(creds) self.placement_controller = PlacementController( self.maas_state, self.config) if path.exists(self.config.placements_filename): try: with open(self.config.placements_filename, 'r') as pf: self.placement_controller.load(pf) except Exception: log.exception("Exception loading placement") raise Exception("Could not load " "{}.".format(self.config.placements_filename)) self.ui.status_info_message("Loaded placements from file") log.info("Loaded placements from " "'{}'".format(self.config.placements_filename)) # If we have no machines (so we are a fresh install) but # are reading a placements.yaml from a previous install, # so it has no assignments, only deployments, tell the # controller to use the deployments in the file as # assignments: if len(self.placement_controller.machines_pending()) == 0 and \ len(self.juju_state.machines()) == 0: self.placement_controller.set_assignments_from_deployments() log.info("Using deployments saved from previous install" " as new assignments.") else: if self.config.is_multi(): def_assignments = self.placement_controller.gen_defaults() else: def_assignments = self.placement_controller.gen_single() self.placement_controller.set_all_assignments(def_assignments) pfn = self.config.placements_filename self.placement_controller.set_autosave_filename(pfn) self.placement_controller.do_autosave() if self.config.is_single(): if self.config.getopt('headless'): self.begin_deployment() else: async.submit(self.begin_deployment, self.ui.show_exception_message) return if self.config.getopt('edit_placement') or \ not self.placement_controller.can_deploy(): self.config.setopt( 'current_state', ControllerState.PLACEMENT.value) else: if self.config.getopt('headless'): self.begin_deployment() else: async.submit(self.begin_deployment, self.ui.show_exception_message) def commit_placement(self): self.config.setopt('current_state', ControllerState.SERVICES.value) self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() if self.config.getopt('headless'): self.begin_deployment() else: async.submit(self.begin_deployment, self.ui.show_exception_message) def begin_deployment(self): if self.config.is_multi(): # now all machines are added self.maas.tag_fpi(self.maas.nodes) self.maas.nodes_accept_all() self.maas.tag_name(self.maas.nodes) while not self.all_maas_machines_ready(): time.sleep(3) self.add_machines_to_juju_multi() elif self.config.is_single(): self.add_machines_to_juju_single() # Quiet out some of the logging _previous_summary = None while not self.all_juju_machines_started(): sd = self.juju_state.machines_summary() summary = ", ".join(["{} {}".format(v, k) for k, v in sd.items()]) if summary != _previous_summary: self.ui.status_info_message("Waiting for machines to " "start: {}".format(summary)) _previous_summary = summary async.sleep_until(1) if len(self.juju_state.machines()) == 0: raise Exception("Expected some juju machines started.") self.config.setopt('current_state', ControllerState.SERVICES.value) ppc = self.config.getopt("postproc_complete") rc = self.config.getopt("relations_complete") if not ppc or not rc: if self.config.is_single(): controller_machine = self.juju_m_idmap['controller'] self.configure_lxc_network(controller_machine) for juju_machine_id in self.juju_m_idmap.values(): async.sleep_until(0) self.run_apt_go_fast(juju_machine_id) self.deploy_using_placement() self.wait_for_deployed_services_ready() self.enqueue_deployed_charms() else: self.ui.status_info_message("Ready") def all_maas_machines_ready(self): self.maas_state.invalidate_nodes_cache() cons = self.config.getopt('constraints') needed = set([m.instance_id for m in self.placement_controller.machines_pending()]) ready = set([m.instance_id for m in self.maas_state.machines(MaasMachineStatus.READY, constraints=cons)]) allocated = set([m.instance_id for m in self.maas_state.machines(MaasMachineStatus.ALLOCATED, constraints=cons) ]) summary = ", ".join(["{} {}".format(v, k) for k, v in self.maas_state.machines_summary().items()]) self.ui.status_info_message("Waiting for {} maas machines to be ready." " Machines Summary: {}".format(len(needed), summary)) if not needed.issubset(ready.union(allocated)): return False return True def add_machines_to_juju_multi(self): """Adds each of the machines used for the placement to juju, if it isn't already there.""" self.juju_state.invalidate_status_cache() juju_ids = [jm.instance_id for jm in self.juju_state.machines()] machine_params = [] for maas_machine in self.placement_controller.machines_pending(): if maas_machine.instance_id in juju_ids: # ignore machines that are already added to juju continue cd = dict(tags=[maas_machine.system_id]) mp = dict(Series="", ContainerType="", ParentId="", Constraints=cd, Jobs=[JujuJobs.HostUnits]) machine_params.append(mp) if len(machine_params) > 0: import pprint log.debug("calling add_machines with params:" " {}".format(pprint.pformat(machine_params))) rv = self.juju.add_machines(machine_params) log.debug("add_machines returned '{}'".format(rv)) def all_juju_machines_started(self): self.juju_state.invalidate_status_cache() n_needed = len(self.placement_controller.machines_pending()) n_allocated = len([jm for jm in self.juju_state.machines() if jm.agent_state == 'started']) return n_allocated >= n_needed def add_machines_to_juju_single(self): self.juju_state.invalidate_status_cache() self.juju_m_idmap = {} for jm in self.juju_state.machines(): response = self.juju.get_annotations(jm.machine_id, 'machine') ann = response['Annotations'] if 'instance_id' in ann: self.juju_m_idmap[ann['instance_id']] = jm.machine_id log.debug("existing juju machines: {}".format(self.juju_m_idmap)) def get_created_machine_id(iid, response): d = response['Machines'][0] if d['Error']: raise Exception("Error adding machine '{}':" "{}".format(iid, response)) else: return d['Machine'] for machine in self.placement_controller.machines_pending(): if machine.instance_id in self.juju_m_idmap: machine.machine_id = self.juju_m_idmap[machine.instance_id] log.debug("machine instance_id {} already exists as #{}, " "skipping".format(machine.instance_id, machine.machine_id)) continue log.debug("adding machine with " "constraints={}".format(machine.constraints)) rv = self.juju.add_machine(constraints=machine.constraints) m_id = get_created_machine_id(machine.instance_id, rv) machine.machine_id = m_id rv = self.juju.set_annotations(m_id, 'machine', {'instance_id': machine.instance_id}) self.juju_m_idmap[machine.instance_id] = m_id def run_apt_go_fast(self, machine_id): utils.remote_cp(machine_id, src=path.join(self.config.share_path, "tools/apt-go-fast"), dst="/tmp/apt-go-fast", juju_home=self.config.juju_home(use_expansion=True)) utils.remote_run(machine_id, cmds="sudo sh /tmp/apt-go-fast", juju_home=self.config.juju_home(use_expansion=True)) def configure_lxc_network(self, machine_id): # upload our lxc-host-only template and setup bridge log.info('Copying network specifications to machine') srcpath = path.join(self.config.tmpl_path, 'lxc-host-only') destpath = "/tmp/lxc-host-only" utils.remote_cp(machine_id, src=srcpath, dst=destpath, juju_home=self.config.juju_home(use_expansion=True)) log.debug('Updating network configuration for machine') utils.remote_run(machine_id, cmds="sudo chmod +x /tmp/lxc-host-only", juju_home=self.config.juju_home(use_expansion=True)) utils.remote_run(machine_id, cmds="sudo /tmp/lxc-host-only", juju_home=self.config.juju_home(use_expansion=True)) def deploy_using_placement(self): """Deploy charms using machine placement from placement controller, waiting for any deferred charms. Then enqueue all charms for further processing and return. """ self.ui.status_info_message("Verifying service deployments") assigned_ccs = self.placement_controller.assigned_charm_classes() charm_classes = sorted(assigned_ccs, key=attrgetter('deploy_priority')) def undeployed_charm_classes(): return [c for c in charm_classes if c not in self.deployed_charm_classes] def update_pending_display(): pending_names = [c.display_name for c in undeployed_charm_classes()] self.ui.set_pending_deploys(pending_names) while len(undeployed_charm_classes()) > 0: update_pending_display() for charm_class in undeployed_charm_classes(): self.ui.status_info_message( "Checking if {c} is deployed".format( c=charm_class.display_name)) service_names = [s.service_name for s in self.juju_state.services] if charm_class.charm_name in service_names: self.ui.status_info_message( "{c} is already deployed, skipping".format( c=charm_class.display_name)) self.deployed_charm_classes.append(charm_class) continue err = self.try_deploy(charm_class) name = charm_class.display_name if err: log.debug( "{} is waiting for another service, will" " re-try in a few seconds".format(name)) break else: log.debug("Issued deploy for {}".format(name)) self.deployed_charm_classes.append(charm_class) self.juju_state.invalidate_status_cache() update_pending_display() num_remaining = len(undeployed_charm_classes()) if num_remaining > 0: log.debug("{} charms pending deploy.".format(num_remaining)) log.debug("deployed_charm_classes={}".format( PrettyLog(self.deployed_charm_classes))) async.sleep_until(5) update_pending_display() def try_deploy(self, charm_class): "returns True if deploy is deferred and should be tried again." charm = charm_class(juju=self.juju, juju_state=self.juju_state, ui=self.ui, config=self.config) asts = self.placement_controller.get_assignments(charm_class) errs = [] first_deploy = True for atype, ml in asts.items(): for machine in ml: mspec = self.get_machine_spec(machine, atype) if mspec is None: errs.append(machine) continue if first_deploy: msg = "Deploying {c}".format(c=charm_class.display_name) if mspec != '': msg += " to machine {mspec}".format(mspec=mspec) self.ui.status_info_message(msg) deploy_err = charm.deploy(mspec) if deploy_err: errs.append(machine) else: first_deploy = False else: # service already deployed, need to add-unit msg = ("Adding one unit of " "{c}".format(c=charm_class.display_name)) if mspec != '': msg += " to machine {mspec}".format(mspec=mspec) self.ui.status_info_message(msg) deploy_err = charm.add_unit(machine_spec=mspec) if deploy_err: errs.append(machine) if not deploy_err: self.placement_controller.mark_deployed(machine, charm_class, atype) had_err = len(errs) > 0 if had_err and not self.config.getopt('headless'): log.warning("deferred deploying to these machines: {}".format( errs)) return had_err def get_machine_spec(self, maas_machine, atype): """Given a machine and assignment type, return a juju machine spec. Returns None on errors, and '' for the subordinate char placeholder. """ if self.placement_controller.is_placeholder(maas_machine.instance_id): # placeholder machines do not use a machine spec return "" jm = next((m for m in self.juju_state.machines() if (m.instance_id == maas_machine.instance_id or m.machine_id == maas_machine.machine_id)), None) if jm is None: log.error("could not find juju machine matching {}" " (instance id {})".format(maas_machine, maas_machine.instance_id)) return None if atype == AssignmentType.BareMetal \ or atype == AssignmentType.DEFAULT: return jm.machine_id elif atype == AssignmentType.LXC: return "lxc:{}".format(jm.machine_id) elif atype == AssignmentType.KVM: return "kvm:{}".format(jm.machine_id) else: log.error("unexpected atype: {}".format(atype)) return None def wait_for_deployed_services_ready(self): """ Blocks until all deployed services attached units are in a 'started' state """ if not self.juju_state: return self.ui.status_info_message( "Waiting for deployed services to be in a ready state.") not_ready_len = 0 while not self.juju_state.all_agents_started(): not_ready = [(a, b) for a, b in self.juju_state.get_agent_states() if b != 'started'] if len(not_ready) == not_ready_len: async.sleep_until(3) continue not_ready_len = len(not_ready) log.info("Checking availability of {} ".format( ", ".join(["{}:{}".format(a, b) for a, b in not_ready]))) async.sleep_until(3) self.config.setopt('deploy_complete', True) self.ui.status_info_message( "Processing relations and finalizing services") def enqueue_deployed_charms(self): """Send all deployed charms to CharmQueue for relation setting and post-proc. """ charm_q = CharmQueue(ui=self.ui, config=self.config, juju=self.juju, juju_state=self.juju_state, deployed_charms=self.deployed_charm_classes) if self.config.getopt('headless'): charm_q.watch_relations() charm_q.watch_post_proc() else: async.submit(charm_q.watch_relations, self.ui.show_exception_message) async.submit(charm_q.watch_post_proc, self.ui.show_exception_message) charm_q.is_running = True # Exit cleanly if we've finished all deploys, relations, # post processing, and running in headless mode. if self.config.getopt('headless'): while not self.config.getopt('postproc_complete'): self.ui.status_info_message( "Waiting for services to be started.") # FIXME: Is this needed? # time.sleep(10) self.ui.status_info_message( "All services deployed, relations set, and started") self.loop.exit(0) self.ui.status_info_message( "Services deployed, relationships still pending." " Please wait for all relations to be set before" " deploying additional services.") self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() def deploy_new_services(self): """Deploys newly added services in background thread. Does not attempt to create new machines. """ self.config.setopt('current_state', ControllerState.SERVICES.value) self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() self.deploy_using_placement() self.wait_for_deployed_services_ready() self.enqueue_deployed_charms() def cancel_add_services(self): """User cancelled add-services screen. Just redisplay services view. """ self.config.setopt('current_state', ControllerState.SERVICES.value) self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() def start(self): """ Starts UI loop """ if self.config.getopt('headless'): self.initialize() else: self.ui.status_info_message("Welcome") rel = self.config.getopt('openstack_release') label = OPENSTACK_RELEASE_LABELS[rel] self.ui.set_openstack_rel(label) self.initialize() self.loop.register_callback('refresh_display', self.update) AlarmMonitor.add_alarm(self.loop.set_alarm_in(0, self.update), "controller-start") self.config.setopt("gui_started", True) self.loop.run() self.loop.close()
class PlacementControllerTestCase(unittest.TestCase): def setUp(self): self.mock_maas_state = MagicMock() with NamedTemporaryFile(mode='w+', encoding='utf-8') as tempf: utils.spew(tempf.name, yaml.dump(dict())) self.conf = Config({}, tempf.name) self.pc = PlacementController(self.mock_maas_state, self.conf) self.mock_machine = MagicMock(name='machine1') pmid = PropertyMock(return_value='fake-instance-id-1') type(self.mock_machine).instance_id = pmid self.mock_machine_2 = MagicMock(name='machine2') pmid2 = PropertyMock(return_value='fake-instance-id-2') type(self.mock_machine_2).instance_id = pmid2 self.mock_machines = [self.mock_machine, self.mock_machine_2] self.mock_maas_state.machines.return_value = self.mock_machines def test_get_assignments_atype(self): self.assertEqual(0, len(self.pc.get_assignments(CharmNovaCompute))) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) md = self.pc.get_assignments(CharmNovaCompute) self.assertEqual(1, len(md)) self.assertEqual(2, len(md[AssignmentType.LXC])) def _do_test_simple_assign_type(self, assignment_type): self.pc.assign(self.mock_machine, CharmNovaCompute, assignment_type) print("assignments is {}".format(self.pc.assignments)) machines = self.pc.get_assignments(CharmNovaCompute) print('machines for charm is {}'.format(machines)) self.assertEqual(machines, {assignment_type: [self.mock_machine]}) ma = self.pc.assignments_for_machine(self.mock_machine) self.assertEqual(ma[assignment_type], [CharmNovaCompute]) def test_simple_assign_bare(self): self._do_test_simple_assign_type(AssignmentType.BareMetal) def test_simple_assign_lxc(self): self._do_test_simple_assign_type(AssignmentType.LXC) def test_simple_assign_kvm(self): self._do_test_simple_assign_type(AssignmentType.KVM) def test_assign_nonmulti(self): self.pc.assign(self.mock_machine, CharmKeystone, AssignmentType.LXC) self.assertEqual(self.pc.get_assignments(CharmKeystone), {AssignmentType.LXC: [self.mock_machine]}) self.pc.assign(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.assertEqual(self.pc.get_assignments(CharmKeystone), {AssignmentType.KVM: [self.mock_machine]}) am = self.pc.assignments_for_machine(self.mock_machine) self.assertEqual(am[AssignmentType.KVM], [CharmKeystone]) self.assertEqual(am[AssignmentType.LXC], []) def test_assign_multi(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.assertEqual(self.pc.get_assignments(CharmNovaCompute), {AssignmentType.LXC: [self.mock_machine]}) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.assertEqual(self.pc.get_assignments(CharmNovaCompute), {AssignmentType.LXC: [self.mock_machine], AssignmentType.KVM: [self.mock_machine]}) ma = self.pc.assignments_for_machine(self.mock_machine) self.assertEqual(ma[AssignmentType.LXC], [CharmNovaCompute]) self.assertEqual(ma[AssignmentType.KVM], [CharmNovaCompute]) def test_remove_assignment_multi(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmNovaCompute, AssignmentType.LXC) mfc = self.pc.get_assignments(CharmNovaCompute) mfc_lxc = set(mfc[AssignmentType.LXC]) self.assertEqual(mfc_lxc, set(self.mock_machines)) self.pc.clear_assignments(self.mock_machine) self.assertEqual(self.pc.get_assignments(CharmNovaCompute), {AssignmentType.LXC: [self.mock_machine_2]}) def test_gen_defaults(self): satisfies_importstring = 'cloudinstall.placement.controller.satisfies' with patch(satisfies_importstring) as mock_satisfies: mock_satisfies.return_value = (True, ) defs = self.pc.gen_defaults(charm_classes=[CharmNovaCompute, CharmKeystone], maas_machines=[self.mock_machine, self.mock_machine_2]) m1_as = defs[self.mock_machine.instance_id] m2_as = defs[self.mock_machine_2.instance_id] self.assertEqual(m1_as[AssignmentType.BareMetal], [CharmNovaCompute]) self.assertEqual(m1_as[AssignmentType.LXC], []) self.assertEqual(m1_as[AssignmentType.KVM], []) self.assertEqual(m2_as[AssignmentType.BareMetal], []) self.assertEqual(m2_as[AssignmentType.LXC], [CharmKeystone]) self.assertEqual(m2_as[AssignmentType.KVM], []) def test_remove_one_assignment_sametype(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.remove_one_assignment(self.mock_machine, CharmNovaCompute) md = self.pc.assignments[self.mock_machine.instance_id] lxcs = md[AssignmentType.LXC] self.assertEqual(lxcs, [CharmNovaCompute]) self.pc.remove_one_assignment(self.mock_machine, CharmNovaCompute) md = self.pc.assignments[self.mock_machine.instance_id] lxcs = md[AssignmentType.LXC] self.assertEqual(lxcs, []) def test_remove_one_assignment_othertype(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.pc.remove_one_assignment(self.mock_machine, CharmNovaCompute) md = self.pc.assignments[self.mock_machine.instance_id] lxcs = md[AssignmentType.LXC] kvms = md[AssignmentType.KVM] self.assertEqual(1, len(lxcs) + len(kvms)) self.pc.remove_one_assignment(self.mock_machine, CharmNovaCompute) md = self.pc.assignments[self.mock_machine.instance_id] lxcs = md[AssignmentType.LXC] kvms = md[AssignmentType.KVM] self.assertEqual(0, len(lxcs) + len(kvms)) def test_clear_all(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmNovaCompute, AssignmentType.KVM) self.pc.clear_all_assignments() # check that it's empty: self.assertEqual(self.pc.assignments, {}) # and that it's still a defaultdict(lambda: defaultdict(list)) mid = self.mock_machine.machine_id lxcs = self.pc.assignments[mid][AssignmentType.LXC] self.assertEqual(lxcs, []) def test_unassigned_starts_full(self): self.assertEqual(len(self.pc.unassigned_undeployed_services()), len(self.pc.charm_classes())) def test_assigned_charm_classes_starts_empty(self): self.assertEqual(0, len(self.pc.assigned_charm_classes())) def test_reset_unassigned_undeployed_none(self): """Assign all charms, ensure that unassigned is empty""" for cc in self.pc.charm_classes(): self.pc.assign(self.mock_machine, cc, AssignmentType.LXC) self.pc.reset_assigned_deployed() self.assertEqual(0, len(self.pc.unassigned_undeployed_services())) def test_reset_unassigned_undeployed_two(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmKeystone, AssignmentType.KVM) self.pc.reset_assigned_deployed() self.assertEqual(len(self.pc.charm_classes()) - 2, len(self.pc.unassigned_undeployed_services())) def test_reset_excepting_compute(self): for cc in self.pc.charm_classes(): if cc.charm_name == 'nova-compute': continue self.pc.assign(self.mock_machine, cc, AssignmentType.LXC) self.pc.reset_assigned_deployed() self.assertEqual(len(self.pc.unassigned_undeployed_services()), 1) def test_unassigned_undeployed(self): all_charms = set(self.pc.charm_classes()) self.pc.assign(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.pc.mark_deployed(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.assertTrue(CharmKeystone not in self.pc.unassigned_undeployed_services()) self.assertTrue(CharmNovaCompute not in self.pc.unassigned_undeployed_services()) self.assertTrue(self.pc.is_deployed(CharmKeystone)) self.assertTrue(self.pc.is_assigned(CharmNovaCompute)) self.assertTrue(len(all_charms) - 2, len(self.pc.unassigned_undeployed_services())) n_k_as = self.pc.assignment_machine_count_for_charm(CharmKeystone) self.assertEqual(n_k_as, 0) n_k_dl = self.pc.deployment_machine_count_for_charm(CharmKeystone) self.assertEqual(n_k_dl, 1) n_nc_as = self.pc.assignment_machine_count_for_charm(CharmNovaCompute) self.assertEqual(n_nc_as, 1) n_nc_dl = self.pc.deployment_machine_count_for_charm(CharmNovaCompute) self.assertEqual(n_nc_dl, 0) def test_deployed_charms_starts_empty(self): "Initially there are no deployed charms" self.assertEqual(0, len(self.pc.deployed_charm_classes())) def test_mark_deployed_unsets_assignment(self): "Setting a placement to deployed removes it from assignment dict" self.pc.assign(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.assertEqual([CharmKeystone], self.pc.assigned_charm_classes()) self.pc.mark_deployed(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.assertEqual([CharmKeystone], self.pc.deployed_charm_classes()) self.assertEqual([], self.pc.assigned_charm_classes()) def test_set_deployed_unsets_assignment_only_once(self): "Setting a placement to deployed removes it from assignment dict" self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.pc.assign(self.mock_machine_2, CharmNovaCompute, AssignmentType.KVM) self.assertEqual([CharmNovaCompute], self.pc.assigned_charm_classes()) ad = self.pc.get_assignments(CharmNovaCompute) dd = self.pc.get_deployments(CharmNovaCompute) from pprint import pformat print("Assignments is {}".format(pformat(ad))) print("Deployments is {}".format(pformat(dd))) self.assertEqual(set([self.mock_machine, self.mock_machine_2]), set(ad[AssignmentType.KVM])) self.assertEqual(len(dd.items()), 0) self.pc.mark_deployed(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.assertEqual([CharmNovaCompute], self.pc.deployed_charm_classes()) self.assertEqual([CharmNovaCompute], self.pc.assigned_charm_classes()) ad = self.pc.get_assignments(CharmNovaCompute) dd = self.pc.get_deployments(CharmNovaCompute) self.assertEqual([self.mock_machine_2], ad[AssignmentType.KVM]) self.assertEqual([self.mock_machine], dd[AssignmentType.KVM]) def test_get_charm_state(self): "Test a sampling of required services and special handling for compute" self.assertEqual(self.pc.get_charm_state(CharmKeystone)[0], CharmState.REQUIRED) self.assertEqual(self.pc.get_charm_state(CharmNovaCompute)[0], CharmState.REQUIRED) def test_one_compute_required(self): """after being assigned at least once, novacompute is no longer considered 'required' (aka required)""" self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.assertNotEqual(self.pc.get_charm_state(CharmNovaCompute)[0], CharmState.REQUIRED) def test_swift_unrequired_then_required_default(self): "Swift and swift-proxy are both optional until you add swift" self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwiftProxy)[0]) self.pc.assign(self.mock_machine, CharmSwift, AssignmentType.LXC) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwiftProxy)[0]) def test_swift_proxy_unrequired_then_required_default(self): "Swift and swift-proxy are both optional until you add swift-proxy" self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwiftProxy)[0]) self.pc.assign(self.mock_machine, CharmSwiftProxy, AssignmentType.LXC) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwift)[0]) # Only one swift-proxy is required, so now that we've added # it, it is still not required: self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwiftProxy)[0]) def test_ceph_num_required(self): "3 units of ceph should be required after having been assigned" state, cons, deps = self.pc.get_charm_state(CharmCeph) self.assertEqual(state, CharmState.OPTIONAL) self.pc.assign(self.mock_machine, CharmCeph, AssignmentType.KVM) self.assertEqual(self.pc.get_charm_state(CharmCeph)[0], CharmState.REQUIRED) self.pc.assign(self.mock_machine, CharmCeph, AssignmentType.KVM) self.pc.assign(self.mock_machine, CharmCeph, AssignmentType.KVM) self.assertEqual(self.pc.get_charm_state(CharmCeph)[0], CharmState.OPTIONAL) def test_persistence(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmKeystone, AssignmentType.KVM) cons1 = PropertyMock(return_value={}) type(self.mock_machine).constraints = cons1 cons2 = PropertyMock(return_value={'cpu': 8}) type(self.mock_machine_2).constraints = cons2 with TemporaryFile(mode='w+', encoding='utf-8') as tempf: self.pc.save(tempf) tempf.seek(0) print(tempf.read()) tempf.seek(0) newpc = PlacementController( self.mock_maas_state, self.conf) newpc.load(tempf) self.assertEqual(self.pc.assignments, newpc.assignments) self.assertEqual(self.pc.machines_pending(), newpc.machines_pending()) self.assertEqual(self.pc.assigned_charm_classes(), newpc.assigned_charm_classes()) m2 = next((m for m in newpc.machines_pending() if m.instance_id == 'fake-instance-id-2')) self.assertEqual(m2.constraints, {'cpu': 8}) def test_load_machines_single(self): with NamedTemporaryFile(mode='w+', encoding='utf-8') as tempf: utils.spew(tempf.name, yaml.dump(dict())) conf = Config({}, tempf.name) fake_assignments = { 'fake_iid': {'constraints': {}, 'assignments': {'KVM': ['nova-compute']}}, 'fake_iid_2': {'constraints': {'cpu': 8}, 'assignments': {'BareMetal': ['nova-compute']}}} singlepc = PlacementController( None, conf) with TemporaryFile(mode='w+', encoding='utf-8') as tempf: yaml.dump(fake_assignments, tempf) tempf.seek(0) singlepc.load(tempf) self.assertEqual(set([m.instance_id for m in singlepc.machines_pending()]), set(['fake_iid', 'fake_iid_2'])) m2 = next((m for m in singlepc.machines_pending() if m.instance_id == 'fake_iid_2')) self.assertEqual(m2.constraints, {'cpu': 8}) def test_load_error_mismatch_charm_name(self): """Should safely ignore (and log) a charm name in a placement file that can't be matched to a loaded charm class.""" singlepc = PlacementController(None, self.conf) fake_assignments = { 'fake_iid': { 'constraints': {}, 'assignments': {'KVM': ['non-existent']}}, 'fake_iid_2': { 'constraints': {'cpu': 8}, 'assignments': {'BareMetal': ['nova-compute']}}} with TemporaryFile(mode='w+', encoding='utf-8') as tempf: yaml.dump(fake_assignments, tempf) tempf.seek(0) singlepc.load(tempf) self.assertEqual(set([m.instance_id for m in singlepc.machines_pending()]), set(['fake_iid_2'])) m2 = next((m for m in singlepc.machines_pending() if m.instance_id == 'fake_iid_2')) self.assertEqual(m2.constraints, {'cpu': 8}) def test_is_assigned_to_is_deployed_to(self): self.assertFalse(self.pc.is_assigned_to(CharmSwiftProxy, self.mock_machine)) self.assertFalse(self.pc.is_deployed_to(CharmSwiftProxy, self.mock_machine)) self.pc.assign(self.mock_machine, CharmSwiftProxy, AssignmentType.LXC) self.assertFalse(self.pc.is_deployed_to(CharmSwiftProxy, self.mock_machine)) self.assertTrue(self.pc.is_assigned_to(CharmSwiftProxy, self.mock_machine)) self.pc.mark_deployed(self.mock_machine, CharmSwiftProxy, AssignmentType.LXC) self.assertTrue(self.pc.is_deployed_to(CharmSwiftProxy, self.mock_machine)) self.assertFalse(self.pc.is_assigned_to(CharmSwiftProxy, self.mock_machine)) def test_double_clear_ok(self): """clearing assignments for a machine that isn't assigned (anymore) is OK and should do nothing """ self.pc.assign(self.mock_machine, CharmSwiftProxy, AssignmentType.LXC) self.pc.clear_assignments(self.mock_machine) self.pc.clear_assignments(self.mock_machine) self.pc.clear_assignments(self.mock_machine_2) def test_gen_defaults_raises_with_no_maas_state(self): pc = PlacementController(None, self.conf) self.assertRaises(PlacementError, pc.gen_defaults) # FIXME: Not sure whats going on with this test, assume # its from the maas-tag addition. Also not sure why # it fails now and not in previous commit? @unittest.skip def test_gen_defaults_uses_only_ready(self): """gen_defaults should only use ready machines""" mock_maas_state = MagicMock() mock_maas_state.machines.return_value = [] c = Config() pc = PlacementController(config=c, maas_state=mock_maas_state) # reset the mock to avoid looking at calls from # PlacementController.__init__(). mock_maas_state.reset_mock() pc.gen_defaults() # we simply check the first call because we know that # follow-on calls are from calls to get_assignments and do # not affect machines used for defaults self.assertEqual(mock_maas_state.machines.mock_calls[0], call(MaasMachineStatus.READY)) def test_gen_single_backends(self): "gen_single has no storage backend by default" def find_charm(cn, defs): allcharms = [] for mname, ad in defs.items(): for atype, charmclasses in ad.items(): allcharms += charmclasses return cn in allcharms c = Config() pc = PlacementController(config=c) defaults = pc.gen_single() self.assertFalse(find_charm(CharmSwiftProxy, defaults)) self.assertFalse(find_charm(CharmSwift, defaults)) self.assertFalse(find_charm(CharmCeph, defaults)) self.assertFalse(find_charm(CharmCephOSD, defaults))
class Controller: """ Controller for Juju deployments and Maas machine init """ def __init__(self, ui, config, loop): self.ui = ui self.ui.controller = self self.config = config self.loop = loop self.juju_state = None self.juju = None self.maas = None self.maas_state = None self.nodes = [] self.juju_m_idmap = None # for single, {instance_id: machine id} self.deployed_charm_classes = [] self.placement_controller = None if not self.config.getopt('current_state'): self.config.setopt('current_state', ControllerState.INSTALL_WAIT.value) def update(self, *args, **kwargs): """Render UI according to current state and reset timer PegasusGUI only. """ interval = 1 current_state = self.config.getopt('current_state') if current_state == ControllerState.PLACEMENT: self.ui.render_placement_view(self.loop, self.config, self.commit_placement) elif current_state == ControllerState.INSTALL_WAIT: if self.ui.node_install_wait_view is None: self.ui.render_node_install_wait( message="Installer is initializing nodes. Please wait.") else: self.ui.node_install_wait_view.redraw_kitt() interval = self.config.node_install_wait_interval elif current_state == ControllerState.ADD_SERVICES: def submit_deploy(): async.submit(self.deploy_new_services, self.ui.show_exception_message) self.ui.render_add_services_dialog( submit_deploy, self.cancel_add_services) elif current_state == ControllerState.SERVICES: self.update_node_states() else: raise Exception("Internal error, unexpected display " "state '{}'".format(current_state)) self.loop.redraw_screen() AlarmMonitor.add_alarm(self.loop.set_alarm_in(interval, self.update), "core-controller-update") def update_node_states(self): """ Updating node states PegasusGUI only """ if not self.juju_state: return deployed_services = sorted(self.juju_state.services, key=attrgetter('service_name')) deployed_service_names = [s.service_name for s in deployed_services] charm_classes = sorted( [m.__charm_class__ for m in utils.load_charms(self.config.getopt('charm_plugin_dir')) if m.__charm_class__.charm_name in deployed_service_names], key=attrgetter('charm_name')) self.nodes = list(zip(charm_classes, deployed_services)) if len(self.nodes) == 0: return else: if not self.ui.services_view: self.ui.render_services_view( self.nodes, self.juju_state, self.maas_state, self.config) else: self.ui.refresh_services_view(self.nodes, self.config) def authenticate_juju(self): uuid = self.config.juju_env['environ-uuid'] if not len(self.config.juju_env['state-servers']) > 0: state_server = 'localhost:17070' else: state_server = self.config.juju_env['state-servers'][0] url = path.join('wss://', state_server, 'environment', uuid, 'api') self.juju = JujuClient( url=url, password=self.config.juju_api_password) self.juju.login() self.juju_state = JujuState(self.juju) log.debug('Authenticated against Juju: {}'.format(url)) def initialize(self): """Authenticates against juju/maas and sets up placement controller.""" if getenv("FAKE_API_DATA"): self.juju_state = FakeJujuState() self.maas_state = FakeMaasState() else: self.authenticate_juju() if self.config.is_multi(): creds = self.config.getopt('maascreds') self.maas, self.maas_state = connect_to_maas(creds) self.placement_controller = PlacementController( self.maas_state, self.config) if path.exists(self.config.placements_filename): try: with open(self.config.placements_filename, 'r') as pf: self.placement_controller.load(pf) except Exception: log.exception("Exception loading placement") raise Exception("Could not load " "{}.".format(self.config.placements_filename)) self.ui.status_info_message("Loaded placements from file") log.info("Loaded placements from " "'{}'".format(self.config.placements_filename)) # If we have no machines (so we are a fresh install) but # are reading a placements.yaml from a previous install, # so it has no assignments, only deployments, tell the # controller to use the deployments in the file as # assignments: if len(self.placement_controller.machines_pending()) == 0 and \ len(self.juju_state.machines()) == 0: self.placement_controller.set_assignments_from_deployments() log.info("Using deployments saved from previous install" " as new assignments.") else: if self.config.is_multi(): def_assignments = self.placement_controller.gen_defaults() else: def_assignments = self.placement_controller.gen_single() self.placement_controller.set_all_assignments(def_assignments) pfn = self.config.placements_filename self.placement_controller.set_autosave_filename(pfn) self.placement_controller.do_autosave() if self.config.is_single(): if self.config.getopt('headless'): self.begin_deployment() else: async.submit(self.begin_deployment, self.ui.show_exception_message) return if self.config.getopt('edit_placement') or \ not self.placement_controller.can_deploy(): self.config.setopt( 'current_state', ControllerState.PLACEMENT.value) else: if self.config.getopt('headless'): self.begin_deployment() else: async.submit(self.begin_deployment, self.ui.show_exception_message) def commit_placement(self): self.config.setopt('current_state', ControllerState.SERVICES.value) self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() if self.config.getopt('headless'): self.begin_deployment() else: async.submit(self.begin_deployment, self.ui.show_exception_message) def begin_deployment(self): if self.config.is_multi(): # now all machines are added self.maas.tag_fpi(self.maas.nodes) self.maas.nodes_accept_all() self.maas.tag_name(self.maas.nodes) while not self.all_maas_machines_ready(): time.sleep(3) self.add_machines_to_juju_multi() elif self.config.is_single(): self.add_machines_to_juju_single() # Quiet out some of the logging _previous_summary = None while not self.all_juju_machines_started(): sd = self.juju_state.machines_summary() summary = ", ".join(["{} {}".format(v, k) for k, v in sd.items()]) if summary != _previous_summary: self.ui.status_info_message("Waiting for machines to " "start: {}".format(summary)) _previous_summary = summary async.sleep_until(1) if len(self.juju_state.machines()) == 0: raise Exception("Expected some juju machines started.") self.config.setopt('current_state', ControllerState.SERVICES.value) ppc = self.config.getopt("postproc_complete") rc = self.config.getopt("relations_complete") if not ppc or not rc: if self.config.is_single(): controller_machine = self.juju_m_idmap['controller'] self.configure_lxc_network(controller_machine) # Add second nic to VMS after lxc network # is configured if not self.config.getopt('attached_interfaces'): for i in range(1, 4): additional_iface_mac = utils.macgen() cmd = ("virsh attach-interface --domain " "ubuntu-local-machine-{} " "--type bridge --source lxcbr0 --model virtio " "--mac {} --config --live".format( i, additional_iface_mac)) log.debug("Adding second interface " "to machine: {}".format(cmd)) out = utils.get_command_output(cmd) log.debug("Result: {}".format(out)) utils.remote_run( i, cmds="sudo /sbin/sysctl -w net.ipv4.ip_forward=1", juju_home=self.config.juju_home( use_expansion=True)) self.config.setopt('attached_interfaces', True) self.deploy_using_placement() self.wait_for_deployed_services_ready() self.enqueue_deployed_charms() else: self.ui.status_info_message("Ready") def all_maas_machines_ready(self): self.maas_state.invalidate_nodes_cache() cons = self.config.getopt('constraints') needed = set([m.instance_id for m in self.placement_controller.machines_pending()]) ready = set([m.instance_id for m in self.maas_state.machines(MaasMachineStatus.READY, constraints=cons)]) allocated = set([m.instance_id for m in self.maas_state.machines(MaasMachineStatus.ALLOCATED, constraints=cons) ]) summary = ", ".join(["{} {}".format(v, k) for k, v in self.maas_state.machines_summary().items()]) self.ui.status_info_message("Waiting for {} maas machines to be ready." " Machines Summary: {}".format(len(needed), summary)) if not needed.issubset(ready.union(allocated)): return False return True def add_machines_to_juju_multi(self): """Adds each of the machines used for the placement to juju, if it isn't already there.""" self.juju_state.invalidate_status_cache() juju_ids = [jm.instance_id for jm in self.juju_state.machines()] machine_params = [] for maas_machine in self.placement_controller.machines_pending(): if maas_machine.instance_id in juju_ids: # ignore machines that are already added to juju continue cd = dict(tags=[maas_machine.system_id]) mp = dict(Series="", ContainerType="", ParentId="", Constraints=cd, Jobs=[JujuJobs.HostUnits]) machine_params.append(mp) if len(machine_params) > 0: import pprint log.debug("calling add_machines with params:" " {}".format(pprint.pformat(machine_params))) rv = self.juju.add_machines(machine_params) log.debug("add_machines returned '{}'".format(rv)) def all_juju_machines_started(self): self.juju_state.invalidate_status_cache() n_needed = len(self.placement_controller.machines_pending()) n_allocated = len([jm for jm in self.juju_state.machines() if jm.agent_state == 'started']) return n_allocated >= n_needed def add_machines_to_juju_single(self): self.juju_state.invalidate_status_cache() self.juju_m_idmap = {} for jm in self.juju_state.machines(): response = self.juju.get_annotations(jm.machine_id, 'machine') ann = response['Annotations'] if 'instance_id' in ann: self.juju_m_idmap[ann['instance_id']] = jm.machine_id log.debug("existing juju machines: {}".format(self.juju_m_idmap)) def get_created_machine_id(iid, response): d = response['Machines'][0] if d['Error']: raise Exception("Error adding machine '{}':" "{}".format(iid, response)) else: return d['Machine'] for machine in self.placement_controller.machines_pending(): if machine.instance_id in self.juju_m_idmap: machine.machine_id = self.juju_m_idmap[machine.instance_id] log.debug("machine instance_id {} already exists as #{}, " "skipping".format(machine.instance_id, machine.machine_id)) continue log.debug("adding machine with " "constraints={}".format(machine.constraints)) rv = self.juju.add_machine(constraints=machine.constraints) m_id = get_created_machine_id(machine.instance_id, rv) machine.machine_id = m_id rv = self.juju.set_annotations(m_id, 'machine', {'instance_id': machine.instance_id}) self.juju_m_idmap[machine.instance_id] = m_id def run_apt_go_fast(self, machine_id): utils.remote_cp(machine_id, src=path.join(self.config.share_path, "tools/apt-go-fast"), dst="/tmp/apt-go-fast", juju_home=self.config.juju_home(use_expansion=True)) utils.remote_run(machine_id, cmds="sudo sh /tmp/apt-go-fast", juju_home=self.config.juju_home(use_expansion=True)) def configure_lxc_network(self, machine_id): # upload our lxc-host-only template and setup bridge log.info('Copying network specifications to machine') srcpath = path.join(self.config.tmpl_path, 'lxc-host-only') destpath = "/tmp/lxc-host-only" utils.remote_cp(machine_id, src=srcpath, dst=destpath, juju_home=self.config.juju_home(use_expansion=True)) log.debug('Updating network configuration for machine') utils.remote_run(machine_id, cmds="sudo chmod +x /tmp/lxc-host-only", juju_home=self.config.juju_home(use_expansion=True)) utils.remote_run(machine_id, cmds="sudo /tmp/lxc-host-only", juju_home=self.config.juju_home(use_expansion=True)) def deploy_using_placement(self): """Deploy charms using machine placement from placement controller, waiting for any deferred charms. Then enqueue all charms for further processing and return. """ self.ui.status_info_message("Verifying service deployments") assigned_ccs = self.placement_controller.assigned_charm_classes() charm_classes = sorted(assigned_ccs, key=attrgetter('deploy_priority')) def undeployed_charm_classes(): return [c for c in charm_classes if c not in self.deployed_charm_classes] def update_pending_display(): pending_names = [c.display_name for c in undeployed_charm_classes()] self.ui.set_pending_deploys(pending_names) while len(undeployed_charm_classes()) > 0: update_pending_display() for charm_class in undeployed_charm_classes(): self.ui.status_info_message( "Checking if {c} is deployed".format( c=charm_class.display_name)) service_names = [s.service_name for s in self.juju_state.services] if charm_class.charm_name in service_names: self.ui.status_info_message( "{c} is already deployed, skipping".format( c=charm_class.display_name)) self.deployed_charm_classes.append(charm_class) continue err = self.try_deploy(charm_class) name = charm_class.display_name if err: log.debug( "{} is waiting for another service, will" " re-try in a few seconds".format(name)) break else: log.debug("Issued deploy for {}".format(name)) self.deployed_charm_classes.append(charm_class) self.juju_state.invalidate_status_cache() update_pending_display() num_remaining = len(undeployed_charm_classes()) if num_remaining > 0: log.debug("{} charms pending deploy.".format(num_remaining)) log.debug("deployed_charm_classes={}".format( PrettyLog(self.deployed_charm_classes))) async.sleep_until(5) update_pending_display() def try_deploy(self, charm_class): "returns True if deploy is deferred and should be tried again." charm = charm_class(juju=self.juju, juju_state=self.juju_state, ui=self.ui, config=self.config) asts = self.placement_controller.get_assignments(charm_class) errs = [] first_deploy = True for atype, ml in asts.items(): for machine in ml: mspec = self.get_machine_spec(machine, atype) if mspec is None: errs.append(machine) continue if first_deploy: msg = "Deploying {c}".format(c=charm_class.display_name) if mspec != '': msg += " to machine {mspec}".format(mspec=mspec) self.ui.status_info_message(msg) deploy_err = charm.deploy(mspec) if deploy_err: errs.append(machine) else: first_deploy = False else: # service already deployed, need to add-unit msg = ("Adding one unit of " "{c}".format(c=charm_class.display_name)) if mspec != '': msg += " to machine {mspec}".format(mspec=mspec) self.ui.status_info_message(msg) deploy_err = charm.add_unit(machine_spec=mspec) if deploy_err: errs.append(machine) if not deploy_err: self.placement_controller.mark_deployed(machine, charm_class, atype) had_err = len(errs) > 0 if had_err and not self.config.getopt('headless'): log.warning("deferred deploying to these machines: {}".format( errs)) return had_err def get_machine_spec(self, maas_machine, atype): """Given a machine and assignment type, return a juju machine spec. Returns None on errors, and '' for the subordinate char placeholder. """ if self.placement_controller.is_placeholder(maas_machine.instance_id): # placeholder machines do not use a machine spec return "" jm = next((m for m in self.juju_state.machines() if (m.instance_id == maas_machine.instance_id or m.machine_id == maas_machine.machine_id)), None) if jm is None: log.error("could not find juju machine matching {}" " (instance id {})".format(maas_machine, maas_machine.instance_id)) return None if atype == AssignmentType.BareMetal \ or atype == AssignmentType.DEFAULT: return jm.machine_id elif atype == AssignmentType.LXC: return "lxc:{}".format(jm.machine_id) elif atype == AssignmentType.KVM: return "kvm:{}".format(jm.machine_id) else: log.error("unexpected atype: {}".format(atype)) return None def wait_for_deployed_services_ready(self): """ Blocks until all deployed services attached units are in a 'started' state """ if not self.juju_state: return self.ui.status_info_message( "Waiting for deployed services to be in a ready state.") not_ready_len = 0 while not self.juju_state.all_agents_started(): not_ready = [(a, b) for a, b in self.juju_state.get_agent_states() if b != 'started'] if len(not_ready) == not_ready_len: async.sleep_until(3) continue not_ready_len = len(not_ready) log.info("Checking availability of {} ".format( ", ".join(["{}:{}".format(a, b) for a, b in not_ready]))) async.sleep_until(3) self.config.setopt('deploy_complete', True) self.ui.status_info_message( "Processing relations and finalizing services") def enqueue_deployed_charms(self): """Send all deployed charms to CharmQueue for relation setting and post-proc. """ charm_q = CharmQueue(ui=self.ui, config=self.config, juju=self.juju, juju_state=self.juju_state, deployed_charms=self.deployed_charm_classes) if self.config.getopt('headless'): charm_q.watch_relations() charm_q.watch_post_proc() else: async.submit(charm_q.watch_relations, self.ui.show_exception_message) async.submit(charm_q.watch_post_proc, self.ui.show_exception_message) charm_q.is_running = True # Exit cleanly if we've finished all deploys, relations, # post processing, and running in headless mode. if self.config.getopt('headless'): while not self.config.getopt('postproc_complete'): self.ui.status_info_message( "Waiting for services to be started.") # FIXME: Is this needed? # time.sleep(10) self.ui.status_info_message( "All services deployed, relations set, and started") self.loop.exit(0) session_id = self.config.getopt('session_id') if self.config.is_single(): utils.pollinate(session_id, 'DS') elif self.config.is_multi(): utils.pollinate(session_id, 'DM') if charm_q.charm_post_proc_q.empty(): self.ui.status_info_message("Ready.") self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() def deploy_new_services(self): """Deploys newly added services in background thread. Does not attempt to create new machines. """ self.config.setopt('current_state', ControllerState.SERVICES.value) self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() self.deploy_using_placement() self.wait_for_deployed_services_ready() self.enqueue_deployed_charms() def cancel_add_services(self): """User cancelled add-services screen. Just redisplay services view. """ self.config.setopt('current_state', ControllerState.SERVICES.value) self.ui.render_services_view(self.nodes, self.juju_state, self.maas_state, self.config) self.loop.redraw_screen() def start(self): """ Starts UI loop """ if self.config.getopt('headless'): self.initialize() else: self.ui.status_info_message("Welcome") rel = self.config.getopt('openstack_release') label = OPENSTACK_RELEASE_LABELS[rel] self.ui.set_openstack_rel(label) self.initialize() self.loop.register_callback('refresh_display', self.update) AlarmMonitor.add_alarm(self.loop.set_alarm_in(0, self.update), "controller-start") self.config.setopt("gui_started", True) self.loop.run() self.loop.close()
class PlacementControllerTestCase(unittest.TestCase): def setUp(self): self.mock_maas_state = MagicMock() with NamedTemporaryFile(mode='w+', encoding='utf-8') as tempf: utils.spew(tempf.name, yaml.dump(dict())) self.conf = Config({}, tempf.name) self.conf.setopt('storage_backend', 'none') self.pc = PlacementController(self.mock_maas_state, self.conf) self.mock_machine = MagicMock(name='machine1') pmid = PropertyMock(return_value='fake-instance-id-1') type(self.mock_machine).instance_id = pmid self.mock_machine_2 = MagicMock(name='machine2') pmid2 = PropertyMock(return_value='fake-instance-id-2') type(self.mock_machine_2).instance_id = pmid2 self.mock_machines = [self.mock_machine, self.mock_machine_2] self.mock_maas_state.machines.return_value = self.mock_machines def test_get_assignments_atype(self): self.assertEqual(0, len(self.pc.get_assignments(CharmNovaCompute))) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) md = self.pc.get_assignments(CharmNovaCompute) self.assertEqual(1, len(md)) self.assertEqual(2, len(md[AssignmentType.LXC])) def _do_test_simple_assign_type(self, assignment_type): self.pc.assign(self.mock_machine, CharmNovaCompute, assignment_type) print("assignments is {}".format(self.pc.assignments)) machines = self.pc.get_assignments(CharmNovaCompute) print('machines for charm is {}'.format(machines)) self.assertEqual(machines, {assignment_type: [self.mock_machine]}) ma = self.pc.assignments_for_machine(self.mock_machine) self.assertEqual(ma[assignment_type], [CharmNovaCompute]) def test_simple_assign_bare(self): self._do_test_simple_assign_type(AssignmentType.BareMetal) def test_simple_assign_lxc(self): self._do_test_simple_assign_type(AssignmentType.LXC) def test_simple_assign_kvm(self): self._do_test_simple_assign_type(AssignmentType.KVM) def test_assign_nonmulti(self): self.pc.assign(self.mock_machine, CharmKeystone, AssignmentType.LXC) self.assertEqual(self.pc.get_assignments(CharmKeystone), {AssignmentType.LXC: [self.mock_machine]}) self.pc.assign(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.assertEqual(self.pc.get_assignments(CharmKeystone), {AssignmentType.KVM: [self.mock_machine]}) am = self.pc.assignments_for_machine(self.mock_machine) self.assertEqual(am[AssignmentType.KVM], [CharmKeystone]) self.assertEqual(am[AssignmentType.LXC], []) def test_assign_multi(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.assertEqual(self.pc.get_assignments(CharmNovaCompute), {AssignmentType.LXC: [self.mock_machine]}) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.assertEqual( self.pc.get_assignments(CharmNovaCompute), { AssignmentType.LXC: [self.mock_machine], AssignmentType.KVM: [self.mock_machine] }) ma = self.pc.assignments_for_machine(self.mock_machine) self.assertEqual(ma[AssignmentType.LXC], [CharmNovaCompute]) self.assertEqual(ma[AssignmentType.KVM], [CharmNovaCompute]) def test_remove_assignment_multi(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmNovaCompute, AssignmentType.LXC) mfc = self.pc.get_assignments(CharmNovaCompute) mfc_lxc = set(mfc[AssignmentType.LXC]) self.assertEqual(mfc_lxc, set(self.mock_machines)) self.pc.clear_assignments(self.mock_machine) self.assertEqual(self.pc.get_assignments(CharmNovaCompute), {AssignmentType.LXC: [self.mock_machine_2]}) def test_gen_defaults(self): satisfies_importstring = 'cloudinstall.placement.controller.satisfies' with patch(satisfies_importstring) as mock_satisfies: mock_satisfies.return_value = (True, ) defs = self.pc.gen_defaults( charm_classes=[CharmNovaCompute, CharmKeystone], maas_machines=[self.mock_machine, self.mock_machine_2]) m1_as = defs[self.mock_machine.instance_id] m2_as = defs[self.mock_machine_2.instance_id] self.assertEqual(m1_as[AssignmentType.BareMetal], [CharmNovaCompute]) self.assertEqual(m1_as[AssignmentType.LXC], []) self.assertEqual(m1_as[AssignmentType.KVM], []) self.assertEqual(m2_as[AssignmentType.BareMetal], []) self.assertEqual(m2_as[AssignmentType.LXC], [CharmKeystone]) self.assertEqual(m2_as[AssignmentType.KVM], []) def test_remove_one_assignment_sametype(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.remove_one_assignment(self.mock_machine, CharmNovaCompute) md = self.pc.assignments[self.mock_machine.instance_id] lxcs = md[AssignmentType.LXC] self.assertEqual(lxcs, [CharmNovaCompute]) self.pc.remove_one_assignment(self.mock_machine, CharmNovaCompute) md = self.pc.assignments[self.mock_machine.instance_id] lxcs = md[AssignmentType.LXC] self.assertEqual(lxcs, []) def test_remove_one_assignment_othertype(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.pc.remove_one_assignment(self.mock_machine, CharmNovaCompute) md = self.pc.assignments[self.mock_machine.instance_id] lxcs = md[AssignmentType.LXC] kvms = md[AssignmentType.KVM] self.assertEqual(1, len(lxcs) + len(kvms)) self.pc.remove_one_assignment(self.mock_machine, CharmNovaCompute) md = self.pc.assignments[self.mock_machine.instance_id] lxcs = md[AssignmentType.LXC] kvms = md[AssignmentType.KVM] self.assertEqual(0, len(lxcs) + len(kvms)) def test_clear_all(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmNovaCompute, AssignmentType.KVM) self.pc.clear_all_assignments() # check that it's empty: self.assertEqual(self.pc.assignments, {}) # and that it's still a defaultdict(lambda: defaultdict(list)) mid = self.mock_machine.machine_id lxcs = self.pc.assignments[mid][AssignmentType.LXC] self.assertEqual(lxcs, []) def test_unassigned_starts_full(self): self.assertEqual(len(self.pc.unassigned_undeployed_services()), len(self.pc.charm_classes())) def test_assigned_charm_classes_starts_empty(self): self.assertEqual(0, len(self.pc.assigned_charm_classes())) def test_reset_unassigned_undeployed_none(self): """Assign all charms, ensure that unassigned is empty""" for cc in self.pc.charm_classes(): self.pc.assign(self.mock_machine, cc, AssignmentType.LXC) self.pc.reset_assigned_deployed() self.assertEqual(0, len(self.pc.unassigned_undeployed_services())) def test_reset_unassigned_undeployed_two(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmKeystone, AssignmentType.KVM) self.pc.reset_assigned_deployed() self.assertEqual( len(self.pc.charm_classes()) - 2, len(self.pc.unassigned_undeployed_services())) def test_reset_excepting_compute(self): for cc in self.pc.charm_classes(): if cc.charm_name == 'nova-compute': continue self.pc.assign(self.mock_machine, cc, AssignmentType.LXC) self.pc.reset_assigned_deployed() self.assertEqual(len(self.pc.unassigned_undeployed_services()), 1) def test_unassigned_undeployed(self): all_charms = set(self.pc.charm_classes()) self.pc.assign(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.pc.mark_deployed(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.assertTrue( CharmKeystone not in self.pc.unassigned_undeployed_services()) self.assertTrue( CharmNovaCompute not in self.pc.unassigned_undeployed_services()) self.assertTrue(self.pc.is_deployed(CharmKeystone)) self.assertTrue(self.pc.is_assigned(CharmNovaCompute)) self.assertTrue( len(all_charms) - 2, len(self.pc.unassigned_undeployed_services())) n_k_as = self.pc.assignment_machine_count_for_charm(CharmKeystone) self.assertEqual(n_k_as, 0) n_k_dl = self.pc.deployment_machine_count_for_charm(CharmKeystone) self.assertEqual(n_k_dl, 1) n_nc_as = self.pc.assignment_machine_count_for_charm(CharmNovaCompute) self.assertEqual(n_nc_as, 1) n_nc_dl = self.pc.deployment_machine_count_for_charm(CharmNovaCompute) self.assertEqual(n_nc_dl, 0) def test_deployed_charms_starts_empty(self): "Initially there are no deployed charms" self.assertEqual(0, len(self.pc.deployed_charm_classes())) def test_mark_deployed_unsets_assignment(self): "Setting a placement to deployed removes it from assignment dict" self.pc.assign(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.assertEqual([CharmKeystone], self.pc.assigned_charm_classes()) self.pc.mark_deployed(self.mock_machine, CharmKeystone, AssignmentType.KVM) self.assertEqual([CharmKeystone], self.pc.deployed_charm_classes()) self.assertEqual([], self.pc.assigned_charm_classes()) def test_set_deployed_unsets_assignment_only_once(self): "Setting a placement to deployed removes it from assignment dict" self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.pc.assign(self.mock_machine_2, CharmNovaCompute, AssignmentType.KVM) self.assertEqual([CharmNovaCompute], self.pc.assigned_charm_classes()) ad = self.pc.get_assignments(CharmNovaCompute) dd = self.pc.get_deployments(CharmNovaCompute) from pprint import pformat print("Assignments is {}".format(pformat(ad))) print("Deployments is {}".format(pformat(dd))) self.assertEqual(set([self.mock_machine, self.mock_machine_2]), set(ad[AssignmentType.KVM])) self.assertEqual(len(dd.items()), 0) self.pc.mark_deployed(self.mock_machine, CharmNovaCompute, AssignmentType.KVM) self.assertEqual([CharmNovaCompute], self.pc.deployed_charm_classes()) self.assertEqual([CharmNovaCompute], self.pc.assigned_charm_classes()) ad = self.pc.get_assignments(CharmNovaCompute) dd = self.pc.get_deployments(CharmNovaCompute) self.assertEqual([self.mock_machine_2], ad[AssignmentType.KVM]) self.assertEqual([self.mock_machine], dd[AssignmentType.KVM]) def test_get_charm_state(self): "Test a sampling of required services and special handling for compute" self.assertEqual( self.pc.get_charm_state(CharmKeystone)[0], CharmState.REQUIRED) self.assertEqual( self.pc.get_charm_state(CharmNovaCompute)[0], CharmState.REQUIRED) def test_one_compute_required(self): """after being assigned at least once, novacompute is no longer considered 'required' (aka required)""" self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.assertNotEqual( self.pc.get_charm_state(CharmNovaCompute)[0], CharmState.REQUIRED) def test_swift_unrequired_then_required_default(self): "Swift and swift-proxy are both optional until you add swift" self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwiftProxy)[0]) self.pc.assign(self.mock_machine, CharmSwift, AssignmentType.LXC) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwiftProxy)[0]) def test_swift_unrequired_then_required_swift_backend(self): "Swift and swift-proxy are not optional with swift as the backend." self.conf.setopt('storage_backend', 'swift') self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwiftProxy)[0]) self.pc.assign(self.mock_machine, CharmSwift, AssignmentType.LXC) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwiftProxy)[0]) def test_swift_proxy_unrequired_then_required_default(self): "Swift and swift-proxy are both optional until you add swift-proxy" self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwiftProxy)[0]) self.pc.assign(self.mock_machine, CharmSwiftProxy, AssignmentType.LXC) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwift)[0]) # Only one swift-proxy is required, so now that we've added # it, it is still not required: self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwiftProxy)[0]) def test_swift_proxy_unrequired_then_required_swift_backend(self): "Swift and swift-proxy are not optional with swift as the backend" self.conf.setopt('storage_backend', 'swift') self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwiftProxy)[0]) self.pc.assign(self.mock_machine, CharmSwiftProxy, AssignmentType.LXC) self.assertEqual(CharmState.REQUIRED, self.pc.get_charm_state(CharmSwift)[0]) # Only one swift-proxy is required, so now that we've added # it, it is still not required: self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwiftProxy)[0]) def test_storage_backends_in_is_required(self): # default is 'none' self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmCeph)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmCephOSD)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwiftProxy)[0]) self.conf.setopt('storage_backend', 'swift') self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmCeph)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmCephOSD)[0]) self.assertEqual(CharmState.CONFLICTED, self.pc.get_charm_state(CharmCephRadosGw)[0]) st = self.pc.get_charm_state(CharmSwift) swift_state, swift_cons, swift_deps = st self.assertEqual(CharmState.REQUIRED, swift_state) st = self.pc.get_charm_state(CharmSwiftProxy) swp_state, swp_cons, swp_deps = st self.assertEqual(CharmState.REQUIRED, swp_state) self.assertEqual([], swp_cons) ceph_state, ceph_cons, ceph_deps = self.pc.get_charm_state(CharmCeph) self.assertEqual(CharmState.OPTIONAL, ceph_state) st = self.pc.get_charm_state(CharmCephRadosGw) ceph_rg_state, ceph_rg_cons, ceph_rg_deps = st self.assertEqual(CharmState.CONFLICTED, ceph_rg_state) self.conf.setopt('storage_backend', 'ceph') ceph_state, ceph_cons, ceph_deps = self.pc.get_charm_state(CharmCeph) self.assertEqual(CharmState.REQUIRED, ceph_state) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmCephOSD)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwift)[0]) self.assertEqual(CharmState.OPTIONAL, self.pc.get_charm_state(CharmSwiftProxy)[0]) def test_ceph_num_required(self): "3 units of ceph should be required after having been assigned" state, cons, deps = self.pc.get_charm_state(CharmCeph) self.assertEqual(state, CharmState.OPTIONAL) self.pc.assign(self.mock_machine, CharmCeph, AssignmentType.KVM) self.assertEqual( self.pc.get_charm_state(CharmCeph)[0], CharmState.REQUIRED) self.pc.assign(self.mock_machine, CharmCeph, AssignmentType.KVM) self.pc.assign(self.mock_machine, CharmCeph, AssignmentType.KVM) self.assertEqual( self.pc.get_charm_state(CharmCeph)[0], CharmState.OPTIONAL) def test_persistence(self): self.pc.assign(self.mock_machine, CharmNovaCompute, AssignmentType.LXC) self.pc.assign(self.mock_machine_2, CharmKeystone, AssignmentType.KVM) cons1 = PropertyMock(return_value={}) type(self.mock_machine).constraints = cons1 cons2 = PropertyMock(return_value={'cpu': 8}) type(self.mock_machine_2).constraints = cons2 with TemporaryFile(mode='w+', encoding='utf-8') as tempf: self.pc.save(tempf) tempf.seek(0) print(tempf.read()) tempf.seek(0) newpc = PlacementController(self.mock_maas_state, self.conf) newpc.load(tempf) self.assertEqual(self.pc.assignments, newpc.assignments) self.assertEqual(self.pc.machines_pending(), newpc.machines_pending()) self.assertEqual(self.pc.assigned_charm_classes(), newpc.assigned_charm_classes()) m2 = next((m for m in newpc.machines_pending() if m.instance_id == 'fake-instance-id-2')) self.assertEqual(m2.constraints, {'cpu': 8}) def test_load_machines_single(self): with NamedTemporaryFile(mode='w+', encoding='utf-8') as tempf: utils.spew(tempf.name, yaml.dump(dict())) conf = Config({}, tempf.name) fake_assignments = { 'fake_iid': { 'constraints': {}, 'assignments': { 'KVM': ['nova-compute'] } }, 'fake_iid_2': { 'constraints': { 'cpu': 8 }, 'assignments': { 'BareMetal': ['nova-compute'] } } } singlepc = PlacementController(None, conf) with TemporaryFile(mode='w+', encoding='utf-8') as tempf: yaml.dump(fake_assignments, tempf) tempf.seek(0) singlepc.load(tempf) self.assertEqual( set([m.instance_id for m in singlepc.machines_pending()]), set(['fake_iid', 'fake_iid_2'])) m2 = next((m for m in singlepc.machines_pending() if m.instance_id == 'fake_iid_2')) self.assertEqual(m2.constraints, {'cpu': 8}) def test_load_error_mismatch_charm_name(self): """Should safely ignore (and log) a charm name in a placement file that can't be matched to a loaded charm class.""" singlepc = PlacementController(None, self.conf) fake_assignments = { 'fake_iid': { 'constraints': {}, 'assignments': { 'KVM': ['non-existent'] } }, 'fake_iid_2': { 'constraints': { 'cpu': 8 }, 'assignments': { 'BareMetal': ['nova-compute'] } } } with TemporaryFile(mode='w+', encoding='utf-8') as tempf: yaml.dump(fake_assignments, tempf) tempf.seek(0) singlepc.load(tempf) self.assertEqual( set([m.instance_id for m in singlepc.machines_pending()]), set(['fake_iid_2'])) m2 = next((m for m in singlepc.machines_pending() if m.instance_id == 'fake_iid_2')) self.assertEqual(m2.constraints, {'cpu': 8}) def test_is_assigned_to_is_deployed_to(self): self.assertFalse( self.pc.is_assigned_to(CharmSwiftProxy, self.mock_machine)) self.assertFalse( self.pc.is_deployed_to(CharmSwiftProxy, self.mock_machine)) self.pc.assign(self.mock_machine, CharmSwiftProxy, AssignmentType.LXC) self.assertFalse( self.pc.is_deployed_to(CharmSwiftProxy, self.mock_machine)) self.assertTrue( self.pc.is_assigned_to(CharmSwiftProxy, self.mock_machine)) self.pc.mark_deployed(self.mock_machine, CharmSwiftProxy, AssignmentType.LXC) self.assertTrue( self.pc.is_deployed_to(CharmSwiftProxy, self.mock_machine)) self.assertFalse( self.pc.is_assigned_to(CharmSwiftProxy, self.mock_machine)) def test_double_clear_ok(self): """clearing assignments for a machine that isn't assigned (anymore) is OK and should do nothing """ self.pc.assign(self.mock_machine, CharmSwiftProxy, AssignmentType.LXC) self.pc.clear_assignments(self.mock_machine) self.pc.clear_assignments(self.mock_machine) self.pc.clear_assignments(self.mock_machine_2) def test_gen_defaults_raises_with_no_maas_state(self): pc = PlacementController(None, self.conf) self.assertRaises(PlacementError, pc.gen_defaults) def test_gen_defaults_uses_only_ready(self): """gen_defaults should only use ready machines""" mock_maas_state = MagicMock() mock_maas_state.machines.return_value = [] c = Config() c.setopt('storage_backend', 'none') pc = PlacementController(config=c, maas_state=mock_maas_state) # reset the mock to avoid looking at calls from # PlacementController.__init__(). mock_maas_state.reset_mock() pc.gen_defaults() # we simply check the first call because we know that # follow-on calls are from calls to get_assignments and do # not affect machines used for defaults self.assertEqual(mock_maas_state.machines.mock_calls[0], call(MaasMachineStatus.READY)) def test_gen_single_backends(self): "gen_single has no storage backend by default" def find_charm(cn, defs): allcharms = [] for mname, ad in defs.items(): for atype, charmclasses in ad.items(): allcharms += charmclasses return cn in allcharms c = Config() pc = PlacementController(config=c) # default storage_backend is 'none' c.setopt('storage_backend', 'none') defaults = pc.gen_single() self.assertFalse(find_charm(CharmSwiftProxy, defaults)) self.assertFalse(find_charm(CharmSwift, defaults)) self.assertFalse(find_charm(CharmCeph, defaults)) self.assertFalse(find_charm(CharmCephOSD, defaults)) c.setopt('storage_backend', 'swift') defaults = pc.gen_single() self.assertTrue(find_charm(CharmSwiftProxy, defaults)) self.assertTrue(find_charm(CharmSwift, defaults)) self.assertFalse(find_charm(CharmCeph, defaults)) self.assertFalse(find_charm(CharmCephOSD, defaults)) c.setopt('storage_backend', 'ceph') defaults = pc.gen_single() self.assertFalse(find_charm(CharmSwiftProxy, defaults)) self.assertFalse(find_charm(CharmSwift, defaults)) self.assertTrue(find_charm(CharmCeph, defaults)) self.assertFalse(find_charm(CharmCephOSD, defaults))