def _check_cluster_status(self, ip, smiles_count, networks_count=1): logging.info("Checking cluster status: ip=%s smiles=%s networks=%s", ip, smiles_count, networks_count) keyfiles = ci.environment.node['admin'].metadata['keyfiles'] ctrl_ssh = SSHClient() ctrl_ssh.connect_ssh(ip, 'root', key_filename=keyfiles) ret = ctrl_ssh.execute('/usr/bin/nova-manage service list') nova_status = ((ret['exit_status'] == 0) and (''.join(ret['stdout']).count(":-)") == smiles_count) and (''.join(ret['stdout']).count("XXX") == 0)) if not nova_status: logging.warn("Nova check fails:\n%s" % ret['stdout']) ret = ctrl_ssh.execute('. /root/openrc; glance index') cirros_status = ((ret['exit_status'] == 0) and (''.join(ret['stdout']).count("TestVM") == 1)) if not cirros_status: logging.warn("Cirros check fails:\n%s" % ret['stdout']) ret = ctrl_ssh.execute('/usr/bin/nova-manage network list') nets_status = ((ret['exit_status'] == 0) and (len(ret['stdout']) == networks_count + 1)) if not nets_status: logging.warn("Networks check fails:\n%s" % ret['stdout']) return (nova_status and cirros_status and nets_status and self._status_logserver())
def __init__(self, *args, **kwargs): super(TestNode, self).__init__(*args, **kwargs) self.remote = SSHClient() self.client = HTTPClient(url="http://%s:8000" % self.get_admin_node_ip()) self.ssh_user = "******" self.ssh_passwd = "r00tme" self.admin_host = self.get_admin_node_ip() self.remote.connect_ssh(self.admin_host, self.ssh_user, self.ssh_passwd)
def test_network_config(self): self._revert_nodes() self._clean_clusters() self._basic_provisioning('network_config', {'controller': ['slave1']}) slave = ci.environment.node['slave1'] keyfiles = ci.environment.node['admin'].metadata['keyfiles'] node = self._get_slave_node_by_devops_node(slave) ctrl_ssh = SSHClient() ctrl_ssh.connect_ssh(node['ip'], 'root', key_filename=keyfiles) ifaces_fail = False for iface in node['network_data']: try: ifname = "%s.%s@%s" % (iface['dev'], iface['vlan'], iface['dev']) ifname_short = "%s.%s" % (iface['dev'], iface['vlan']) except KeyError: ifname = iface['dev'] iface_data = ''.join( ctrl_ssh.execute('/sbin/ip addr show dev %s' % ifname_short)['stdout']) if iface_data.find(ifname) == -1: logging.error("Interface %s is absent" % ifname_short) ifaces_fail = True else: try: if iface_data.find("inet %s" % iface['ip']) == -1: logging.error("Interface %s does not have ip %s" % (ifname_short, iface['ip'])) ifaces_fail = True except KeyError: if iface_data.find("inet ") != -1: logging.error( "Interface %s does have ip. And it should not" % ifname_short) ifaces_fail = True try: if iface_data.find("brd %s" % iface['brd']) == -1: logging.error( "Interface %s does not have broadcast %s" % (ifname_short, iface['brd'])) ifaces_fail = True except KeyError: pass self.assertEquals(ifaces_fail, False)
def _basic_provisioning(self, cluster_name, nodes_dict, task_timeout=30 * 60): self._start_logserver() self._clean_clusters() cluster_id = self._create_cluster(name=cluster_name) # Here we updating cluster editable attributes # In particular we set extra syslog server response = self.client.get("/api/clusters/%s/attributes/" % cluster_id) attrs = json.loads(response.read()) attrs["editable"]["syslog"]["syslog_server"]["value"] = \ self.get_host_node_ip() attrs["editable"]["syslog"]["syslog_port"]["value"] = \ self.logserver.bound_port() self.client.put("/api/clusters/%s/attributes/" % cluster_id, attrs) node_names = [] for role in nodes_dict: node_names += nodes_dict[role] try: if len(node_names) > 1: if len(nodes_dict['controller']) == 1: self.client.put("/api/clusters/%s/" % cluster_id, {"mode": "multinode"}) if len(nodes_dict['controller']) > 1: self.client.put("/api/clusters/%s/" % cluster_id, {"mode": "ha"}) except KeyError: pass nodes = self._bootstrap_nodes(node_names) for role in nodes_dict: for n in nodes_dict[role]: slave = ci.environment.node[n] node = self._get_slave_node_by_devops_node(slave) self.client.put("/api/nodes/%s/" % node['id'], { "role": role, "pending_addition": True }) self._update_nodes_in_cluster(cluster_id, nodes) task = self._launch_provisioning(cluster_id) self._task_wait(task, 'Installation', task_timeout) logging.info("Checking role files on slave nodes") keyfiles = ci.environment.node['admin'].metadata['keyfiles'] for role in nodes_dict: for n in nodes_dict[role]: logging.info("Checking /tmp/%s-file on %s" % (role, n)) slave = ci.environment.node[n] node = self._get_slave_node_by_devops_node(slave) ctrl_ssh = SSHClient() for i in node['meta']['interfaces']: ip = i.get('ip', None) if ip: logging.debug("Trying to connect to %s via ssh", ip) try: ctrl_ssh.connect_ssh(ip, 'root', key_filename=keyfiles) except Exception, e: logging.debug("Unable to connect to %s: %s", ip, str(e)) continue ret = ctrl_ssh.execute('test -f /tmp/%s-file' % role) self.assertEquals( ret['exit_status'], 0, ("File '/tmp/%s-file' not found" % role)) ctrl_ssh.disconnect() break else: i_name = i.get('name') or i.get('mac') or str(i) logging.debug("Interface doesn't have an IP: %r", i_name) self.assertNotEqual( ip, None, "Unable to fing a valid IP" " for node %s" % n)
def __init__(self, *args, **kwargs): super(TestPuppetMaster, self).__init__(*args, **kwargs) self.remote = SSHClient()
def __init__(self, *args, **kwargs): super(TestCobbler, self).__init__(*args, **kwargs) self.remote = SSHClient()
def __init__(self, *args, **kwargs): super(TestNailyd, self).__init__(*args, **kwargs) self.remote = SSHClient()