def filter_hosts(self): """ Exclude any non-RPM-based hosts, and any downburst VMs """ super(SELinux, self).filter_hosts() new_cluster = Cluster() for (remote, roles) in self.cluster.remotes.items(): if remote.is_vm: msg = "Excluding {host}: VMs are not yet supported" log.info(msg.format(host=remote.shortname)) elif remote.is_container: msg = "Excluding {host}: containers are not yet supported" log.info(msg.format(host=remote.shortname)) elif remote.os.name in ['opensuse', 'sle']: msg = "Excluding {host}: \ SELinux is not supported for '{os}' os_type yet" log.info(msg.format(host=remote.shortname, os=remote.os.name)) elif remote.os.package_type == 'rpm': new_cluster.add(remote, roles) else: msg = "Excluding {host}: OS '{os}' does not support SELinux" log.debug(msg.format(host=remote.shortname, os=remote.os.name)) self.cluster = new_cluster return self.cluster
def filter_hosts(self): """ Look for a 'hosts' list in self.config. Each item in the list may either be a role or a hostname. Builds a new Cluster object containing only those hosts which match one (or more) of the roles or hostnames specified. The filtered Cluster object is stored as self.cluster so that the task may only run against those hosts. """ if not hasattr(self.ctx, 'cluster'): return elif 'hosts' not in self.config: self.cluster = self.ctx.cluster return self.cluster host_specs = self.config.get('hosts', list()) cluster = Cluster() for host_spec in host_specs: role_matches = self.ctx.cluster.only(host_spec) if len(role_matches.remotes) > 0: for (remote, roles) in role_matches.remotes.iteritems(): cluster.add(remote, roles) elif isinstance(host_spec, basestring): for (remote, roles) in self.ctx.cluster.remotes.iteritems(): if remote.name.split('@')[-1] == host_spec or \ remote.shortname == host_spec: cluster.add(remote, roles) if not cluster.remotes: raise RuntimeError("All target hosts were excluded!") self.cluster = cluster hostnames = [h.shortname for h in self.cluster.remotes.keys()] self.log.debug("Restricting task {name} to hosts: {hosts}".format( name=self.name, hosts=' '.join(hostnames)) ) return self.cluster
def setup(self): self.ctx = FakeNamespace() self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('user@remote1'), ['role1']) self.ctx.cluster.add(Remote('user@remote2'), ['role2']) self.ctx.config = dict() self.task_config = dict(playbook=[])
def filter_hosts(self): """ Exclude any non-RPM-based hosts, and any downburst VMs """ super(SELinux, self).filter_hosts() new_cluster = Cluster() for (remote, roles) in self.cluster.remotes.iteritems(): if remote.shortname.startswith('vpm'): msg = "Excluding {host}: downburst VMs are not yet supported" log.info(msg.format(host=remote.shortname)) elif remote.os.package_type == 'rpm': new_cluster.add(remote, roles) else: msg = "Excluding {host}: OS '{os}' does not support SELinux" log.debug(msg.format(host=remote.shortname, os=remote.os.name)) self.cluster = new_cluster return self.cluster
def filter_hosts(self): """ Exclude any non-RPM-based hosts, and any downburst VMs """ super(SELinux, self).filter_hosts() new_cluster = Cluster() for (remote, roles) in self.cluster.remotes.iteritems(): if remote.is_vm: msg = "Excluding {host}: VMs are not yet supported" log.info(msg.format(host=remote.shortname)) elif remote.os.package_type == 'rpm': new_cluster.add(remote, roles) else: msg = "Excluding {host}: OS '{os}' does not support SELinux" log.debug(msg.format(host=remote.shortname, os=remote.os.name)) self.cluster = new_cluster return self.cluster
def setup(self): self.ctx = FakeNamespace() self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('user@remote1'), ['mon.0']) self.ctx.cluster.add(Remote('user@remote2'), ['mds.0']) self.ctx.cluster.add(Remote('user@remote3'), ['osd.0']) self.ctx.config = dict() self.task_config = dict() self.start_patchers()
def setup_method(self, method): self.ctx = FakeNamespace() self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('user@remote1'), ['role1']) self.ctx.cluster.add(Remote('user@remote2'), ['role2']) self.ctx.config = dict() self.ctx.summary = dict() self.task_config = dict(playbook=[]) self.start_patchers()
def filter_hosts(self): super(ConsoleLog, self).filter_hosts() if not hasattr(self.ctx, 'cluster'): return new_cluster = Cluster() for (remote, roles) in self.cluster.remotes.iteritems(): if not hasattr(remote.console, 'spawn_sol_log'): log.debug("%s does not support IPMI; excluding", remote.shortname) elif not (remote.console.has_ipmi_credentials or remote.console.has_conserver): log.debug("Cannot find IPMI credentials or conserver settings " "for %s; excluding", remote.shortname) else: new_cluster.add(remote, roles) self.cluster = new_cluster return self.cluster
def filter_hosts(self): super(ConsoleLog, self).filter_hosts() if not hasattr(self.ctx, 'cluster'): return new_cluster = Cluster() for (remote, roles) in self.cluster.remotes.iteritems(): if not hasattr(remote.console, 'spawn_sol_log'): log.debug("%s does not support IPMI; excluding", remote.shortname) elif not (remote.console.has_ipmi_credentials or remote.console.has_conserver): log.debug( "Cannot find IPMI credentials or conserver settings " "for %s; excluding", remote.shortname) else: new_cluster.add(remote, roles) self.cluster = new_cluster return self.cluster
def test_hosts_no_results(self): self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('user@remote1'), ['role1']) self.task_config.update(dict(hosts=['role2'], )) with patch.multiple( self.klass, begin=DEFAULT, ): with raises(RuntimeError): with self.klass(self.ctx, self.task_config): pass
def setup(self): teuth_config.ipmi_domain = 'ipmi.domain' teuth_config.ipmi_user = '******' teuth_config.ipmi_password = '******' self.ctx = FakeNamespace() self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('user@remote1'), ['role1']) self.ctx.cluster.add(Remote('user@remote2'), ['role2']) self.ctx.config = dict() self.ctx.archive = '/fake/path' self.task_config = dict() self.start_patchers()
def filter_hosts(self): """ Look for a 'hosts' list in self.config. Each item in the list may either be a role or a hostname. Builds a new Cluster object containing only those hosts which match one (or more) of the roles or hostnames specified. The filtered Cluster object is stored as self.cluster so that the task may only run against those hosts. """ if not hasattr(self.ctx, 'cluster'): return elif 'hosts' not in self.config: self.cluster = self.ctx.cluster return self.cluster host_specs = self.config.get('hosts', list()) cluster = Cluster() for host_spec in host_specs: role_matches = self.ctx.cluster.only(host_spec) if len(role_matches.remotes) > 0: for (remote, roles) in role_matches.remotes.iteritems(): cluster.add(remote, roles) elif isinstance(host_spec, basestring): for (remote, roles) in self.ctx.cluster.remotes.iteritems(): if remote.name.split('@')[-1] == host_spec or \ remote.shortname == host_spec: cluster.add(remote, roles) if not cluster.remotes: raise RuntimeError("All target hosts were excluded!") self.cluster = cluster hostnames = [h.shortname for h in self.cluster.remotes.keys()] self.log.debug("Restricting task {name} to hosts: {hosts}".format( name=self.name, hosts=' '.join(hostnames))) return self.cluster
def test_hosts_one_role(self): self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('user@remote1'), ['role1']) self.ctx.cluster.add(Remote('user@remote2'), ['role2']) self.task_config.update(dict(hosts=['role1'], )) with patch.multiple( self.klass, begin=DEFAULT, ): with self.klass(self.ctx, self.task_config) as task: task_hosts = task.cluster.remotes.keys() assert len(task_hosts) == 1 assert task_hosts[0].shortname == 'remote1'
def test_hosts_no_filter(self): self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('user@remote1'), ['role1']) self.ctx.cluster.add(Remote('user@remote2'), ['role2']) with patch.multiple( self.klass, begin=DEFAULT, ): with self.klass(self.ctx, self.task_config) as task: task_hosts = task.cluster.remotes.keys() assert len(task_hosts) == 2 assert sorted(host.shortname for host in task_hosts) == \ ['remote1', 'remote2']
def test_hosts_two_roles(self): self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('user@remote1'), ['role1']) self.ctx.cluster.add(Remote('user@remote2'), ['role2']) self.ctx.cluster.add(Remote('user@remote3'), ['role3']) self.task_config.update(dict(hosts=['role1', 'role3'], )) with patch.multiple( self.klass, begin=DEFAULT, ): with self.klass(self.ctx, self.task_config) as task: task_hosts = task.cluster.remotes.keys() assert len(task_hosts) == 2 hostnames = [host.shortname for host in task_hosts] assert sorted(hostnames) == ['remote1', 'remote3']
def test_hosts_one_role_one_hostname(self): self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('*****@*****.**'), ['role1']) self.ctx.cluster.add(Remote('*****@*****.**'), ['role2']) self.ctx.cluster.add(Remote('*****@*****.**'), ['role3']) self.task_config.update(dict(hosts=['role1', 'remote2.example.com'], )) with patch.multiple( self.klass, begin=DEFAULT, end=DEFAULT, ): with self.klass(self.ctx, self.task_config) as task: task_hosts = list(task.cluster.remotes) assert len(task_hosts) == 2 hostnames = [host.hostname for host in task_hosts] assert sorted(hostnames) == [ 'remote1.example.com', 'remote2.example.com' ]
def test_host_exclusion(self): with patch.multiple( Remote, os=DEFAULT, run=DEFAULT, ): self.ctx.cluster = Cluster() remote1 = Remote('remote1') remote1.os = Mock() remote1.os.package_type = 'rpm' self.ctx.cluster.add(remote1, ['role1']) remote2 = Remote('remote1') remote2.os = Mock() remote2.os.package_type = 'deb' self.ctx.cluster.add(remote2, ['role2']) task_config = dict() with SELinux(self.ctx, task_config) as task: remotes = task.cluster.remotes.keys() assert remotes == [remote1]
def setup(self): self.ctx = FakeNamespace() self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('user@remote1'), ['role1']) self.ctx.cluster.add(Remote('user@remote2'), ['role2']) self.ctx.config = dict() self.task_config = dict() self.patcher_fetch_repo = patch('teuthology.task.ansible.fetch_repo') self.patcher_fetch_repo.return_value = 'PATH' self.patcher_fetch_repo.start() def fake_get_playbook(self): self.playbook_file = Mock() self.playbook_file.name = 'cephlab.yml' self.patcher_get_playbook = patch( 'teuthology.task.ansible.CephLab.get_playbook', new=fake_get_playbook, ) self.patcher_get_playbook.start()
def setup(self): self.ctx = FakeNamespace() self.ctx.cluster = Cluster() self.ctx.cluster.add(Remote('remote1'), ['mon.a', 'client.0']) self.ctx.cluster.add(Remote('remote2'), ['osd.0', 'osd.1', 'osd.2']) self.ctx.cluster.add(Remote('remote3'), ['client.1'])