Esempio n. 1
0
 def __init__(self, *args, **kwargs):
     super(TestNode, self).__init__(*args, **kwargs)
     self.remote = SSHClient()
     self.client = HTTPClient(url="http://%s:8000" %
                              self.get_admin_node_ip())
     self.ssh_user = "******"
     self.ssh_passwd = "r00tme"
     self.admin_host = self.get_admin_node_ip()
     self.remote.connect_ssh(self.admin_host, self.ssh_user,
                             self.ssh_passwd)
Esempio n. 2
0
 def __init__(self, *args, **kwargs):
     super(TestNode, self).__init__(*args, **kwargs)
     self.remote = SSHClient()
     self.client = HTTPClient(
         url="http://%s:8000" % self.get_admin_node_ip()
     )
     self.ssh_user = "******"
     self.ssh_passwd = "r00tme"
     self.admin_host = self.get_admin_node_ip()
     self.remote.connect_ssh(
         self.admin_host,
         self.ssh_user,
         self.ssh_passwd
     )
Esempio n. 3
0
 def __init__(self, ip):
     self.client = HTTPClient(url="http://%s:8000" % ip)
     super(NailgunClient, self).__init__()
Esempio n. 4
0
class NailgunClient(object):
    def __init__(self, ip):
        self.client = HTTPClient(url="http://%s:8000" % ip)
        super(NailgunClient, self).__init__()

    @logwrap
    def get_root(self):
        return self.client.get("/")

    @logwrap
    @json_parse
    def list_nodes(self):
        return self.client.get("/api/nodes/")

    @logwrap
    @json_parse
    def list_cluster_nodes(self, cluster_id):
        return self.client.get("/api/nodes/?cluster_id=%s" % cluster_id)

    @logwrap
    @json_parse
    def get_networks(self, cluster_id):
        return self.client.get(
            "/api/clusters/%d/network_configuration/" % cluster_id)

    @logwrap
    @json_parse
    def verify_networks(self, cluster_id, networks):
        return self.client.put(
            "/api/clusters/%d/network_configuration/verify/" % cluster_id,
            {'networks': networks}
        )

    @logwrap
    @json_parse
    def get_cluster_attributes(self, cluster_id):
        return self.client.get(
            "/api/clusters/%s/attributes/" % cluster_id
        )

    @logwrap
    @json_parse
    def update_cluster_attributes(self, cluster_id, attrs):
        return self.client.put(
            "/api/clusters/%s/attributes/" % cluster_id, attrs
        )

    @logwrap
    @json_parse
    def get_cluster(self, cluster_id):
        return self.client.get(
            "/api/clusters/%s" % cluster_id)

    @logwrap
    @json_parse
    def update_cluster(self, cluster_id, data):
        return self.client.put(
            "/api/clusters/%s/" % cluster_id,
            data
        )

    @logwrap
    @json_parse
    def update_node(self, node_id, data):
        return self.client.put(
            "/api/nodes/%s/" % node_id, data
        )

    @logwrap
    @json_parse
    def update_cluster_changes(self, cluster_id):
        return self.client.put(
            "/api/clusters/%d/changes/" % cluster_id
        )

    @logwrap
    @json_parse
    def get_task(self, task_id):
        return self.client.get("/api/tasks/%s" % task_id)

    @logwrap
    @json_parse
    def get_releases(self):
        return self.client.get("/api/releases/")

    @logwrap
    def get_grizzly_release_id(self):
        for release in self.get_releases():
            if release["name"].find("Grizzly") != -1:
                return release["id"]

    @logwrap
    @json_parse
    def list_clusters(self):
        return self.client.get("/api/clusters/")

    @logwrap
    @json_parse
    def create_cluster(self, data):
        return self.client.post(
            "/api/clusters",
            data=data
        )

    @logwrap
    @json_parse
    def update_network(self, cluster_id, flat_net=None, net_manager=None):
        data = {}
        if flat_net is not None:
            data.update({'networks': flat_net})
        if net_manager is not None:
            data.update({'net_manager': net_manager})
        return self.client.put(
            "/api/clusters/%d/network_configuration" % cluster_id, data
        )

    @logwrap
    def get_cluster_id(self, name):
        for cluster in self.list_clusters():
            if cluster["name"] == name:
                return cluster["id"]

    @logwrap
    def add_syslog_server(self, cluster_id, host, port):
        # Here we updating cluster editable attributes
        # In particular we set extra syslog server
        attributes = self.get_cluster_attributes(cluster_id)
        attributes["editable"]["syslog"]["syslog_server"]["value"] = host
        attributes["editable"]["syslog"]["syslog_port"]["value"] = port
        self.update_cluster_attributes(cluster_id, attributes)

    @logwrap
    def clean_clusters(self):
        for cluster in self.list_clusters():
            self.update_cluster(
                cluster["id"], {"nodes": []}
            )

    @logwrap
    def _get_cluster_vlans(self, cluster_id):
        cluster_vlans = []
        for network in self.get_networks(cluster_id)['networks']:
            amount = network.get('amount', 1)
            cluster_vlans.extend(range(network['vlan_start'],
                                       network['vlan_start'] + amount))
        return cluster_vlans
Esempio n. 5
0
class TestNode(Base):
    def __init__(self, *args, **kwargs):
        super(TestNode, self).__init__(*args, **kwargs)
        self.remote = SSHClient()
        self.client = HTTPClient(url="http://%s:8000" %
                                 self.get_admin_node_ip())
        self.ssh_user = "******"
        self.ssh_passwd = "r00tme"
        self.admin_host = self.get_admin_node_ip()
        self.remote.connect_ssh(self.admin_host, self.ssh_user,
                                self.ssh_passwd)

    def setUp(self):
        pass

    def tearDown(self):
        self._wait_for_threads()
        try:
            self._stop_logserver()
        except AttributeError:
            pass

    def _start_logserver(self, handler=None):
        self._logserver_status = False
        if not handler:
            """
            We define log message handler in such a way
            assuming that if at least one message is received
            logging works fine.
            """
            def handler(message):
                self._logserver_status = True

        self.logserver = LogServer(address=self.get_host_node_ip(), port=5514)
        self.logserver.set_handler(handler)
        self.logserver.start()

    def _stop_logserver(self):
        self.logserver.stop()
        self._logserver_status = False

    def _status_logserver(self):
        return self._logserver_status

    def test_release_upload(self):
        self._upload_sample_release()

    def test_http_returns_200(self):
        resp = self.client.get("/")
        self.assertEquals(200, resp.getcode())

    def test_create_empty_cluster(self):
        self._create_cluster(name='empty')

    @snapshot_errors
    def test_node_deploy(self):
        self._revert_nodes()
        self._bootstrap_nodes(['slave1'])

    @snapshot_errors
    def test_updating_nodes_in_cluster(self):
        self._revert_nodes()
        cluster_id = self._create_cluster(name='empty')
        nodes = self._bootstrap_nodes(['slave1'])
        self._update_nodes_in_cluster(cluster_id, nodes)

    @snapshot_errors
    def test_one_node_provisioning(self):
        self._revert_nodes()
        self._clean_clusters()
        self._basic_provisioning('provision', {'controller': ['slave1']})

    @snapshot_errors
    def test_simple_cluster_flat(self):
        logging.info("Testing simple flat installation.")
        self._revert_nodes()
        cluster_name = 'simple_flat'
        nodes = {'controller': ['slave1'], 'compute': ['slave2']}
        cluster_id = self._basic_provisioning(cluster_name, nodes)
        slave = ci.environment.node['slave1']
        node = self._get_slave_node_by_devops_node(slave)
        wait(lambda: self._check_cluster_status(node['ip'], 5), timeout=300)

        logging.info("Verifying networks for simple flat installation.")
        vlans = self._get_cluster_vlans(cluster_id)
        slave2 = ci.environment.node['slave2']
        for vlan in vlans:
            for n in (slave, slave2):
                self._restore_vlan_in_ebtables(n.interfaces[0].target_dev,
                                               vlan, False)
        task = self._run_network_verify(cluster_id)
        self._task_wait(task, 'Verify network simple flat', 60 * 2)

    @snapshot_errors
    def test_simple_cluster_vlan(self):
        logging.info("Testing simple vlan installation.")
        self._revert_nodes()
        cluster_name = 'simple_vlan'
        nodes = {'controller': ['slave1'], 'compute': ['slave2']}
        self._create_cluster(name=cluster_name, net_manager="VlanManager")
        cluster_id = self._basic_provisioning(cluster_name, nodes)
        slave = ci.environment.node['slave1']
        node = self._get_slave_node_by_devops_node(slave)
        wait(lambda: self._check_cluster_status(node['ip'], 5, 8), timeout=300)

        logging.info("Verifying networks for simple vlan installation.")
        vlans = self._get_cluster_vlans(cluster_id)
        slave2 = ci.environment.node['slave2']
        for vlan in vlans:
            for n in (slave, slave2):
                self._restore_vlan_in_ebtables(n.interfaces[0].target_dev,
                                               vlan, False)
        task = self._run_network_verify(cluster_id)
        self._task_wait(task, 'Verify network simple vlan', 60 * 2)

    @snapshot_errors
    def test_ha_cluster_flat(self):
        logging.info("Testing ha flat installation.")
        self._revert_nodes()
        cluster_name = 'ha_flat'
        nodes = {
            'controller': ['slave1', 'slave2', 'slave3'],
            'compute': ['slave4', 'slave5']
        }
        cluster_id = self._basic_provisioning(cluster_name, nodes, 90 * 60)
        logging.info("Checking cluster status on slave1")
        slave = ci.environment.node['slave1']
        node = self._get_slave_node_by_devops_node(slave)
        wait(lambda: self._check_cluster_status(node['ip'], 13), timeout=300)

        logging.info("Verifying networks for ha flat installation.")
        vlans = self._get_cluster_vlans(cluster_id)
        slave2 = ci.environment.node['slave2']
        slave3 = ci.environment.node['slave3']
        slave4 = ci.environment.node['slave4']
        slave5 = ci.environment.node['slave5']
        for vlan in vlans:
            for n in (slave, slave2, slave3, slave4, slave5):
                self._restore_vlan_in_ebtables(n.interfaces[0].target_dev,
                                               vlan, False)
        task = self._run_network_verify(cluster_id)
        self._task_wait(task, 'Verify network ha flat', 60 * 2)

    @snapshot_errors
    def test_ha_cluster_vlan(self):
        logging.info("Testing ha vlan installation.")
        self._revert_nodes()
        cluster_name = 'ha_vlan'
        nodes = {
            'controller': ['slave1', 'slave2', 'slave3'],
            'compute': ['slave4', 'slave5']
        }
        self._create_cluster(name=cluster_name, net_manager="VlanManager")
        cluster_id = self._basic_provisioning(cluster_name, nodes, 90 * 60)
        slave = ci.environment.node['slave1']
        node = self._get_slave_node_by_devops_node(slave)
        wait(lambda: self._check_cluster_status(node['ip'], 13, 8),
             timeout=300)

        logging.info("Verifying networks for ha vlan installation.")
        vlans = self._get_cluster_vlans(cluster_id)
        slave2 = ci.environment.node['slave2']
        slave3 = ci.environment.node['slave3']
        slave4 = ci.environment.node['slave4']
        slave5 = ci.environment.node['slave5']
        for vlan in vlans:
            for n in (slave, slave2, slave3, slave4, slave5):
                self._restore_vlan_in_ebtables(n.interfaces[0].target_dev,
                                               vlan, False)
        task = self._run_network_verify(cluster_id)
        self._task_wait(task, 'Verify network ha vlan', 60 * 2)

    @snapshot_errors
    def test_network_config(self):
        self._revert_nodes()
        self._clean_clusters()
        self._basic_provisioning('network_config', {'controller': ['slave1']})

        slave = ci.environment.node['slave1']
        keyfiles = ci.environment.node['admin'].metadata['keyfiles']
        node = self._get_slave_node_by_devops_node(slave)
        ctrl_ssh = SSHClient()
        ctrl_ssh.connect_ssh(node['ip'], 'root', key_filename=keyfiles)
        ifaces_fail = False
        for iface in node['network_data']:
            try:
                ifname = "%s.%s@%s" % (iface['dev'], iface['vlan'],
                                       iface['dev'])
                ifname_short = "%s.%s" % (iface['dev'], iface['vlan'])
            except KeyError:
                ifname = iface['dev']
            iface_data = ''.join(
                ctrl_ssh.execute('/sbin/ip addr show dev %s' %
                                 ifname_short)['stdout'])
            if iface_data.find(ifname) == -1:
                logging.error("Interface %s is absent" % ifname_short)
                ifaces_fail = True
            else:
                try:
                    if iface_data.find("inet %s" % iface['ip']) == -1:
                        logging.error("Interface %s does not have ip %s" %
                                      (ifname_short, iface['ip']))
                        ifaces_fail = True
                except KeyError:
                    if iface_data.find("inet ") != -1:
                        logging.error(
                            "Interface %s does have ip.  And it should not" %
                            ifname_short)
                        ifaces_fail = True
                try:
                    if iface_data.find("brd %s" % iface['brd']) == -1:
                        logging.error(
                            "Interface %s does not have broadcast %s" %
                            (ifname_short, iface['brd']))
                        ifaces_fail = True
                except KeyError:
                    pass
        self.assertEquals(ifaces_fail, False)

    @snapshot_errors
    def test_node_deletion(self):
        self._revert_nodes()
        cluster_name = 'node_deletion'
        node_name = 'slave1'
        nodes = {'controller': [node_name]}
        cluster_id = self._basic_provisioning(cluster_name, nodes)

        slave = ci.environment.node[node_name]
        node = self._get_slave_node_by_devops_node(slave)
        self.client.put("/api/nodes/%s/" % node['id'],
                        {'pending_deletion': True})
        task = self._launch_provisioning(cluster_id)
        self._task_wait(task, 'Node deletion', 60)

        timer = time.time()
        timeout = 5 * 60
        while True:
            response = self.client.get("/api/nodes/")
            nodes = json.loads(response.read())
            for n in nodes:
                if (n['mac'] == node['mac'] and n['status'] == 'discover'):
                    return
            if (time.time() - timer) > timeout:
                raise Exception("Bootstrap boot timeout expired!")
            time.sleep(5)

    @snapshot_errors
    def test_network_verify_with_blocked_vlan(self):
        self._revert_nodes()
        cluster_name = 'net_verify'
        cluster_id = self._create_cluster(name=cluster_name)
        node_names = ['slave1', 'slave2']
        nailgun_slave_nodes = self._bootstrap_nodes(node_names)
        devops_nodes = [ci.environment.node[n] for n in node_names]
        logging.info("Clear BROUTING table entries.")
        vlans = self._get_cluster_vlans(cluster_id)
        for vlan in vlans:
            for node in devops_nodes:
                for interface in node.interfaces:
                    self._restore_vlan_in_ebtables(interface.target_dev, vlan,
                                                   False)
        self._update_nodes_in_cluster(cluster_id, nailgun_slave_nodes)
        for node in devops_nodes:
            for interface in node.interfaces:
                self._block_vlan_in_ebtables(interface.target_dev, vlans[0])
        task = self._run_network_verify(cluster_id)
        task = self._task_wait(task,
                               'Verify network in cluster with blocked vlan',
                               60 * 2, True)
        self.assertEquals(task['status'], 'error')

    @snapshot_errors
    def test_multinic_bootstrap_booting(self):
        self._revert_nodes()
        slave = filter(lambda n: n.name != 'admin' and len(n.interfaces) > 1,
                       ci.environment.nodes)[0]
        nodename = slave.name
        logging.info("Using node %r with %d interfaces", nodename,
                     len(slave.interfaces))
        slave.stop()
        admin = ci.environment.node['admin']
        macs = [i.mac_address for i in slave.interfaces]
        logging.info("Block all MACs: %s.", ', '.join([m for m in macs]))
        for mac in macs:
            self._block_mac_in_ebtables(mac)
            self.addCleanup(self._restore_mac_in_ebtables, mac)
        for mac in macs:
            logging.info("Trying to boot node %r via interface with MAC %s...",
                         nodename, mac)
            self._restore_mac_in_ebtables(mac)
            slave.start()
            nailgun_slave = self._bootstrap_nodes([nodename])[0]
            self.assertEqual(mac.upper(), nailgun_slave['mac'].upper())
            slave.stop()
            admin.restore_snapshot('initial')
            self._block_mac_in_ebtables(mac)

    @staticmethod
    def _block_mac_in_ebtables(mac):
        try:
            subprocess.check_output(
                'sudo ebtables -t filter -A FORWARD -s %s -j DROP' % mac,
                stderr=subprocess.STDOUT,
                shell=True)
            logging.debug("MAC %s blocked via ebtables.", mac)
        except subprocess.CalledProcessError as e:
            raise Exception("Can't block mac %s via ebtables: %s", mac,
                            e.output)

    @staticmethod
    def _restore_mac_in_ebtables(mac):
        try:
            subprocess.check_output(
                'sudo ebtables -t filter -D FORWARD -s %s -j DROP' % mac,
                stderr=subprocess.STDOUT,
                shell=True)
            logging.debug("MAC %s unblocked via ebtables.", mac)
        except subprocess.CalledProcessError as e:
            logging.warn("Can't restore mac %s via ebtables: %s", mac,
                         e.output)

    def _block_vlan_in_ebtables(self, target_dev, vlan):
        try:
            subprocess.check_output(
                'sudo ebtables -t broute -A BROUTING -i %s -p 8021Q'
                ' --vlan-id %s -j DROP' % (target_dev, vlan),
                stderr=subprocess.STDOUT,
                shell=True)
            self.addCleanup(self._restore_vlan_in_ebtables, target_dev, vlan)
            logging.debug("Vlan %s on interface %s blocked via ebtables.",
                          vlan, target_dev)
        except subprocess.CalledProcessError as e:
            raise Exception("Can't block vlan %s for interface %s"
                            " via ebtables: %s" % (vlan, target_dev, e.output))

    def _get_cluster_vlans(self, cluster_id):
        resp = self.client.get('/api/clusters/%d/network_configuration/' %
                               cluster_id)
        self.assertEquals(200, resp.getcode())
        cluster_vlans = []
        networks = json.loads(resp.read())['networks']
        for n in networks:
            amount = n.get('amount', 1)
            cluster_vlans.extend(
                range(n['vlan_start'], n['vlan_start'] + amount))
        self.assertNotEqual(cluster_vlans, [])
        return cluster_vlans

    @staticmethod
    def _restore_vlan_in_ebtables(target_dev, vlan, log=True):
        try:
            subprocess.check_output(
                'sudo ebtables -t broute -D BROUTING -i %s -p 8021Q'
                ' --vlan-id %s -j DROP' % (target_dev, vlan),
                stderr=subprocess.STDOUT,
                shell=True)
            logging.debug("Vlan %s on interface %s unblocked via ebtables.",
                          vlan, target_dev)
        except subprocess.CalledProcessError as e:
            if log:
                logging.warn("Can't restore vlan %s for interface %s"
                             " via ebtables: %s" %
                             (vlan, target_dev, e.output))

    def _run_network_verify(self, cluster_id):
        logging.info("Run network verify in cluster %d", cluster_id)
        resp = self.client.get('/api/clusters/%d/network_configuration/' %
                               cluster_id)
        self.assertEquals(200, resp.getcode())
        network_configuration = json.loads(resp.read())
        changes = self.client.put(
            '/api/clusters/%d/network_configuration/verify/' % cluster_id,
            network_configuration)
        self.assertEquals(200, changes.getcode())
        return json.loads(changes.read())

    def _basic_provisioning(self,
                            cluster_name,
                            nodes_dict,
                            task_timeout=30 * 60):
        self._start_logserver()
        self._clean_clusters()
        cluster_id = self._create_cluster(name=cluster_name)

        # Here we updating cluster editable attributes
        # In particular we set extra syslog server
        response = self.client.get("/api/clusters/%s/attributes/" % cluster_id)
        attrs = json.loads(response.read())
        attrs["editable"]["syslog"]["syslog_server"]["value"] = \
            self.get_host_node_ip()
        attrs["editable"]["syslog"]["syslog_port"]["value"] = \
            self.logserver.bound_port()
        self.client.put("/api/clusters/%s/attributes/" % cluster_id, attrs)

        node_names = []
        for role in nodes_dict:
            node_names += nodes_dict[role]
        try:
            if len(node_names) > 1:
                if len(nodes_dict['controller']) == 1:
                    self.client.put("/api/clusters/%s/" % cluster_id,
                                    {"mode": "multinode"})
                if len(nodes_dict['controller']) > 1:
                    self.client.put("/api/clusters/%s/" % cluster_id,
                                    {"mode": "ha"})
        except KeyError:
            pass

        nodes = self._bootstrap_nodes(node_names)

        for role in nodes_dict:
            for n in nodes_dict[role]:
                slave = ci.environment.node[n]
                node = self._get_slave_node_by_devops_node(slave)
                self.client.put("/api/nodes/%s/" % node['id'], {
                    "role": role,
                    "pending_addition": True
                })

        self._update_nodes_in_cluster(cluster_id, nodes)
        task = self._launch_provisioning(cluster_id)

        self._task_wait(task, 'Installation', task_timeout)

        logging.info("Checking role files on slave nodes")
        keyfiles = ci.environment.node['admin'].metadata['keyfiles']
        for role in nodes_dict:
            for n in nodes_dict[role]:
                logging.info("Checking /tmp/%s-file on %s" % (role, n))
                slave = ci.environment.node[n]
                node = self._get_slave_node_by_devops_node(slave)
                ctrl_ssh = SSHClient()
                for i in node['meta']['interfaces']:
                    ip = i.get('ip', None)
                    if ip:
                        logging.debug("Trying to connect to %s via ssh", ip)
                        try:
                            ctrl_ssh.connect_ssh(ip,
                                                 'root',
                                                 key_filename=keyfiles)
                        except Exception, e:
                            logging.debug("Unable to connect to %s: %s", ip,
                                          str(e))
                            continue
                        ret = ctrl_ssh.execute('test -f /tmp/%s-file' % role)
                        self.assertEquals(
                            ret['exit_status'], 0,
                            ("File '/tmp/%s-file' not found" % role))
                        ctrl_ssh.disconnect()
                        break
                    else:
                        i_name = i.get('name') or i.get('mac') or str(i)
                        logging.debug("Interface doesn't have an IP: %r",
                                      i_name)
                self.assertNotEqual(
                    ip, None, "Unable to fing a valid IP"
                    " for node %s" % n)
        return cluster_id
Esempio n. 6
0
 def __init__(self, ip):
     self.client = HTTPClient(url="http://%s:8000" % ip)
     super(NailgunClient, self).__init__()
Esempio n. 7
0
class NailgunClient(object):
    def __init__(self, ip):
        self.client = HTTPClient(url="http://%s:8000" % ip)
        super(NailgunClient, self).__init__()

    @logwrap
    def get_root(self):
        return self.client.get("/")

    @logwrap
    @json_parse
    def list_nodes(self):
        return self.client.get("/api/nodes/")

    @logwrap
    @json_parse
    def list_cluster_nodes(self, cluster_id):
        return self.client.get("/api/nodes/?cluster_id=%s" % cluster_id)

    @logwrap
    @json_parse
    def get_networks(self, cluster_id):
        net_provider = self.get_cluster(cluster_id)['net_provider']
        return self.client.get("/api/clusters/%d/network_configuration/%s" %
                               (cluster_id, net_provider))

    @logwrap
    @json_parse
    def verify_networks(self, cluster_id, networks):
        net_provider = self.get_cluster(cluster_id)['net_provider']
        return self.client.put(
            "/api/clusters/%d/network_configuration/%s/verify/" %
            (cluster_id, net_provider), {'networks': networks})

    @logwrap
    @json_parse
    def get_cluster_attributes(self, cluster_id):
        return self.client.get("/api/clusters/%s/attributes/" % cluster_id)

    @logwrap
    @json_parse
    def update_cluster_attributes(self, cluster_id, attrs):
        return self.client.put("/api/clusters/%s/attributes/" % cluster_id,
                               attrs)

    @logwrap
    @json_parse
    def get_cluster(self, cluster_id):
        return self.client.get("/api/clusters/%s" % cluster_id)

    @logwrap
    @json_parse
    def update_cluster(self, cluster_id, data):
        return self.client.put("/api/clusters/%s/" % cluster_id, data)

    @logwrap
    @json_parse
    def delete_cluster(self, cluster_id):
        return self.client.delete("/api/clusters/%s/" % cluster_id)

    @logwrap
    @json_parse
    def update_node(self, node_id, data):
        return self.client.put("/api/nodes/%s/" % node_id, data)

    @logwrap
    @json_parse
    def update_nodes(self, data):
        return self.client.put("/api/nodes", data)

    @logwrap
    @json_parse
    def deploy_cluster_changes(self, cluster_id):
        return self.client.put("/api/clusters/%d/changes/" % cluster_id)

    @logwrap
    @json_parse
    def get_task(self, task_id):
        return self.client.get("/api/tasks/%s" % task_id)

    @logwrap
    @json_parse
    def get_tasks(self):
        return self.client.get("/api/tasks")

    @logwrap
    @json_parse
    def get_releases(self):
        return self.client.get("/api/releases/")

    @logwrap
    @json_parse
    def get_node_disks(self, disk_id):
        return self.client.get("/api/nodes/%s/disks" % disk_id)

    @logwrap
    def get_release_id(self, release_name=OPENSTACK_RELEASE):
        for release in self.get_releases():
            if release["name"].find(release_name) != -1:
                return release["id"]

    @logwrap
    @json_parse
    def get_node_interfaces(self, node_id):
        return self.client.get("/api/nodes/%s/interfaces" % node_id)

    @logwrap
    @json_parse
    def put_node_interfaces(self, data):
        return self.client.put("/api/nodes/interfaces", data)

    @logwrap
    @json_parse
    def list_clusters(self):
        return self.client.get("/api/clusters/")

    @logwrap
    @json_parse
    def create_cluster(self, data):
        return self.client.post("/api/clusters", data=data)

    @logwrap
    @json_parse
    def get_ostf_test_sets(self, cluster_id):
        return self.client.get("/ostf/testsets/%s" % cluster_id)

    @logwrap
    @json_parse
    def get_ostf_tests(self, cluster_id):
        return self.client.get("/ostf/tests/%s" % cluster_id)

    @logwrap
    @json_parse
    def get_ostf_test_run(self, cluster_id):
        return self.client.get("/ostf/testruns/last/%s" % cluster_id)

    @logwrap
    @json_parse
    def ostf_run_tests(self, cluster_id, test_sets_list):
        data = []
        for test_set in test_sets_list:
            data.append({
                'metadata': {
                    'cluster_id': str(cluster_id),
                    'config': {}
                },
                'testset': test_set
            })
        # get tests otherwise 500 error will be thrown
        self.get_ostf_tests(cluster_id)
        return self.client.post("/ostf/testruns", data)

    @logwrap
    @json_parse
    def update_network(self, cluster_id, networks=None, net_manager=None):
        data = {}
        net_provider = self.get_cluster(cluster_id)['net_provider']
        if networks is not None:
            data.update({'networks': networks})
        if net_manager is not None:
            data.update({'net_manager': net_manager})
        return self.client.put(
            "/api/clusters/%d/network_configuration/%s" %
            (cluster_id, net_provider), data)

    @logwrap
    def get_cluster_id(self, name):
        for cluster in self.list_clusters():
            if cluster["name"] == name:
                return cluster["id"]

    @logwrap
    def add_syslog_server(self, cluster_id, host, port):
        # Here we updating cluster editable attributes
        # In particular we set extra syslog server
        attributes = self.get_cluster_attributes(cluster_id)
        attributes["editable"]["syslog"]["syslog_server"]["value"] = host
        attributes["editable"]["syslog"]["syslog_port"]["value"] = port
        self.update_cluster_attributes(cluster_id, attributes)

    @logwrap
    def clean_clusters(self):
        for cluster in self.list_clusters():
            self.delete_cluster(cluster["id"])

    @logwrap
    def _get_cluster_vlans(self, cluster_id):
        cluster_vlans = []
        for network in self.get_networks(cluster_id)['networks']:
            if not network['vlan_start'] is None:
                amount = network.get('amount', 1)
                cluster_vlans.extend(
                    range(network['vlan_start'],
                          network['vlan_start'] + amount))
        return cluster_vlans

    @logwrap
    @json_parse
    def get_notifications(self):
        return self.client.get("/api/notifications")

    @logwrap
    @json_parse
    def update_redhat_setup(self, data):
        return self.client.post("/api/redhat/setup", data=data)

    @logwrap
    @json_parse
    def generate_logs(self):
        return self.client.put("/api/logs/package")
Esempio n. 8
0
class NailgunClient(object):
    def __init__(self, ip):
        self.client = HTTPClient(url="http://%s:8000" % ip)
        super(NailgunClient, self).__init__()

    @logwrap
    def get_root(self):
        return self.client.get("/")

    @logwrap
    @json_parse
    def list_nodes(self):
        return self.client.get("/api/nodes/")

    @logwrap
    @json_parse
    def list_cluster_nodes(self, cluster_id):
        return self.client.get("/api/nodes/?cluster_id=%s" % cluster_id)

    @logwrap
    @json_parse
    def get_networks(self, cluster_id):
        return self.client.get(
            "/api/clusters/%d/network_configuration/" % cluster_id)

    @logwrap
    @json_parse
    def verify_networks(self, cluster_id, networks):
        return self.client.put(
            "/api/clusters/%d/network_configuration/verify/" % cluster_id,
            {'networks': networks}
        )

    @logwrap
    @json_parse
    def get_cluster_attributes(self, cluster_id):
        return self.client.get(
            "/api/clusters/%s/attributes/" % cluster_id
        )

    @logwrap
    @json_parse
    def update_cluster_attributes(self, cluster_id, attrs):
        return self.client.put(
            "/api/clusters/%s/attributes/" % cluster_id, attrs
        )

    @logwrap
    @json_parse
    def get_cluster(self, cluster_id):
        return self.client.get(
            "/api/clusters/%s" % cluster_id)

    @logwrap
    @json_parse
    def update_cluster(self, cluster_id, data):
        return self.client.put(
            "/api/clusters/%s/" % cluster_id,
            data
        )

    @logwrap
    @json_parse
    def delete_cluster(self, cluster_id):
        return self.client.delete(
            "/api/clusters/%s/" % cluster_id
        )

    @logwrap
    @json_parse
    def update_node(self, node_id, data):
        return self.client.put(
            "/api/nodes/%s/" % node_id, data
        )

    @logwrap
    @json_parse
    def update_nodes(self, data):
        return self.client.put(
            "/api/nodes", data
        )

    @logwrap
    @json_parse
    def deploy_cluster_changes(self, cluster_id):
        return self.client.put(
            "/api/clusters/%d/changes/" % cluster_id
        )

    @logwrap
    @json_parse
    def get_task(self, task_id):
        return self.client.get("/api/tasks/%s" % task_id)

    @logwrap
    @json_parse
    def get_releases(self):
        return self.client.get("/api/releases/")

    @logwrap
    def get_grizzly_release_id(self):
        for release in self.get_releases():
            if release["name"].find("Grizzly") != -1:
                return release["id"]

    @logwrap
    @json_parse
    def list_clusters(self):
        return self.client.get("/api/clusters/")

    @logwrap
    @json_parse
    def create_cluster(self, data):
        return self.client.post(
            "/api/clusters",
            data=data
        )

    @logwrap
    @json_parse
    def update_network(self, cluster_id, networks=None, net_manager=None):
        data = {}
        if networks is not None:
            data.update({'networks': networks})
        if net_manager is not None:
            data.update({'net_manager': net_manager})
        return self.client.put(
            "/api/clusters/%d/network_configuration" % cluster_id, data
        )

    @logwrap
    def get_cluster_id(self, name):
        for cluster in self.list_clusters():
            if cluster["name"] == name:
                return cluster["id"]

    @logwrap
    def add_syslog_server(self, cluster_id, host, port):
        # Here we updating cluster editable attributes
        # In particular we set extra syslog server
        attributes = self.get_cluster_attributes(cluster_id)
        attributes["editable"]["syslog"]["syslog_server"]["value"] = host
        attributes["editable"]["syslog"]["syslog_port"]["value"] = port
        self.update_cluster_attributes(cluster_id, attributes)

    @logwrap
    def clean_clusters(self):
        for cluster in self.list_clusters():
            self.delete_cluster(cluster["id"])

    @logwrap
    def _get_cluster_vlans(self, cluster_id):
        cluster_vlans = []
        for network in self.get_networks(cluster_id)['networks']:
            amount = network.get('amount', 1)
            cluster_vlans.extend(range(network['vlan_start'],
                                       network['vlan_start'] + amount))
        return cluster_vlans
Esempio n. 9
0
class NailgunClient(object):
    def __init__(self, ip):
        self.client = HTTPClient(url="http://%s:8000" % ip)
        super(NailgunClient, self).__init__()

    @logwrap
    def get_root(self):
        return self.client.get("/")

    @logwrap
    @json_parse
    def list_nodes(self):
        return self.client.get("/api/nodes/")

    @logwrap
    @json_parse
    def list_cluster_nodes(self, cluster_id):
        return self.client.get("/api/nodes/?cluster_id=%s" % cluster_id)

    @logwrap
    @json_parse
    def get_networks(self, cluster_id):
        return self.client.get(
            "/api/clusters/%d/network_configuration/" % cluster_id)

    @logwrap
    @json_parse
    def verify_networks(self, cluster_id, networks):
        return self.client.put(
            "/api/clusters/%d/network_configuration/verify/" % cluster_id,
            {'networks': networks}
        )

    @logwrap
    @json_parse
    def get_cluster_attributes(self, cluster_id):
        return self.client.get(
            "/api/clusters/%s/attributes/" % cluster_id
        )

    @logwrap
    @json_parse
    def update_cluster_attributes(self, cluster_id, attrs):
        return self.client.put(
            "/api/clusters/%s/attributes/" % cluster_id, attrs
        )

    @logwrap
    @json_parse
    def get_cluster(self, cluster_id):
        return self.client.get(
            "/api/clusters/%s" % cluster_id)

    @logwrap
    @json_parse
    def update_cluster(self, cluster_id, data):
        return self.client.put(
            "/api/clusters/%s/" % cluster_id,
            data
        )

    @logwrap
    @json_parse
    def delete_cluster(self, cluster_id):
        return self.client.delete(
            "/api/clusters/%s/" % cluster_id
        )

    @logwrap
    @json_parse
    def update_node(self, node_id, data):
        return self.client.put(
            "/api/nodes/%s/" % node_id, data
        )

    @logwrap
    @json_parse
    def update_nodes(self, data):
        return self.client.put(
            "/api/nodes", data
        )

    @logwrap
    @json_parse
    def deploy_cluster_changes(self, cluster_id):
        return self.client.put(
            "/api/clusters/%d/changes/" % cluster_id
        )

    @logwrap
    @json_parse
    def get_task(self, task_id):
        return self.client.get("/api/tasks/%s" % task_id)

    @logwrap
    @json_parse
    def get_tasks(self):
        return self.client.get("/api/tasks")

    @logwrap
    @json_parse
    def get_releases(self):
        return self.client.get("/api/releases/")

    @logwrap
    @json_parse
    def get_node_disks(self, disk_id):
        return self.client.get("/api/nodes/%s/disks" % disk_id)

    @logwrap
    def get_release_id(self, release_name=OPENSTACK_RELEASE):
        for release in self.get_releases():
            if release["name"].find(release_name) != -1:
                return release["id"]

    @logwrap
    @json_parse
    def get_node_interfaces(self, node_id):
        return self.client.get("/api/nodes/%s/interfaces" % node_id)

    @logwrap
    @json_parse
    def put_node_interfaces(self, data):
        return self.client.put("/api/nodes/interfaces", data)

    @logwrap
    @json_parse
    def list_clusters(self):
        return self.client.get("/api/clusters/")

    @logwrap
    @json_parse
    def create_cluster(self, data):
        return self.client.post(
            "/api/clusters",
            data=data
        )

    @logwrap
    @json_parse
    def get_ostf_test_sets(self):
        return self.client.get("/ostf/testsets")

    @logwrap
    @json_parse
    def get_ostf_tests(self):
        return self.client.get("/ostf/tests")

    @logwrap
    @json_parse
    def get_ostf_test_run(self, cluster_id):
        return self.client.get("/ostf/testruns/last/%s" % cluster_id)

    @logwrap
    @json_parse
    def ostf_run_tests(self, cluster_id, test_sets_list):
        data = []
        for test_set in test_sets_list:
            data.append(
                {
                    'metadata': {'cluster_id': cluster_id, 'config': {}},
                    'testset': test_set
                }
            )
        return self.client.post("/ostf/testruns", data)

    @logwrap
    @json_parse
    def update_network(self, cluster_id, networks=None, net_manager=None):
        data = {}
        if networks is not None:
            data.update({'networks': networks})
        if net_manager is not None:
            data.update({'net_manager': net_manager})
        return self.client.put(
            "/api/clusters/%d/network_configuration" % cluster_id, data
        )

    @logwrap
    def get_cluster_id(self, name):
        for cluster in self.list_clusters():
            if cluster["name"] == name:
                return cluster["id"]

    @logwrap
    def add_syslog_server(self, cluster_id, host, port):
        # Here we updating cluster editable attributes
        # In particular we set extra syslog server
        attributes = self.get_cluster_attributes(cluster_id)
        attributes["editable"]["syslog"]["syslog_server"]["value"] = host
        attributes["editable"]["syslog"]["syslog_port"]["value"] = port
        self.update_cluster_attributes(cluster_id, attributes)

    @logwrap
    def clean_clusters(self):
        for cluster in self.list_clusters():
            self.delete_cluster(cluster["id"])

    @logwrap
    def _get_cluster_vlans(self, cluster_id):
        cluster_vlans = []
        for network in self.get_networks(cluster_id)['networks']:
            amount = network.get('amount', 1)
            cluster_vlans.extend(range(network['vlan_start'],
                                       network['vlan_start'] + amount))
        return cluster_vlans

    @logwrap
    @json_parse
    def get_notifications(self):
        return self.client.get("/api/notifications")

    @logwrap
    @json_parse
    def update_redhat_setup(self, data):
        return self.client.post("/api/redhat/setup", data=data)

    @logwrap
    @json_parse
    def generate_logs(self):
        return self.client.put("/api/logs/package")
Esempio n. 10
0
class TestNode(Base):
    def __init__(self, *args, **kwargs):
        super(TestNode, self).__init__(*args, **kwargs)
        self.remote = SSHClient()
        self.client = HTTPClient(
            url="http://%s:8000" % self.get_admin_node_ip()
        )
        self.ssh_user = "******"
        self.ssh_passwd = "r00tme"
        self.admin_host = self.get_admin_node_ip()
        self.remote.connect_ssh(
            self.admin_host,
            self.ssh_user,
            self.ssh_passwd
        )

    def setUp(self):
        pass

    def tearDown(self):
        self._wait_for_threads()
        try:
            self._stop_logserver()
        except AttributeError:
            pass

    def _start_logserver(self, handler=None):
        self._logserver_status = False
        if not handler:
            """
            We define log message handler in such a way
            assuming that if at least one message is received
            logging works fine.
            """

            def handler(message):
                self._logserver_status = True

        self.logserver = LogServer(
            address=self.get_host_node_ip(),
            port=5514
        )
        self.logserver.set_handler(handler)
        self.logserver.start()

    def _stop_logserver(self):
        self.logserver.stop()
        self._logserver_status = False

    def _status_logserver(self):
        return self._logserver_status

    def test_release_upload(self):
        self._upload_sample_release()

    def test_http_returns_200(self):
        resp = self.client.get("/")
        self.assertEquals(200, resp.getcode())

    def test_create_empty_cluster(self):
        self._create_cluster(name='empty')

    @snapshot_errors
    def test_node_deploy(self):
        self._revert_nodes()
        self._bootstrap_nodes(['slave1'])

    @snapshot_errors
    def test_updating_nodes_in_cluster(self):
        self._revert_nodes()
        cluster_id = self._create_cluster(name='empty')
        nodes = self._bootstrap_nodes(['slave1'])
        self._update_nodes_in_cluster(cluster_id, nodes)

    @snapshot_errors
    def test_one_node_provisioning(self):
        self._revert_nodes()
        self._clean_clusters()
        self._basic_provisioning('provision', {'controller': ['slave1']})

    @snapshot_errors
    def test_simple_cluster_flat(self):
        logging.info("Testing simple flat installation.")
        self._revert_nodes()
        cluster_name = 'simple_flat'
        nodes = {'controller': ['slave1'], 'compute': ['slave2']}
        cluster_id = self._basic_provisioning(cluster_name, nodes)
        slave = ci.environment.node['slave1']
        node = self._get_slave_node_by_devops_node(slave)
        wait(lambda: self._check_cluster_status(node['ip'], 5), timeout=300)

        logging.info("Verifying networks for simple flat installation.")
        vlans = self._get_cluster_vlans(cluster_id)
        slave2 = ci.environment.node['slave2']
        for vlan in vlans:
            for n in (slave, slave2):
                self._restore_vlan_in_ebtables(
                    n.interfaces[0].target_dev,
                    vlan,
                    False
                )
        task = self._run_network_verify(cluster_id)
        self._task_wait(task, 'Verify network simple flat', 60 * 2)

    @snapshot_errors
    def test_simple_cluster_vlan(self):
        logging.info("Testing simple vlan installation.")
        self._revert_nodes()
        cluster_name = 'simple_vlan'
        nodes = {'controller': ['slave1'], 'compute': ['slave2']}
        self._create_cluster(name=cluster_name, net_manager="VlanManager")
        cluster_id = self._basic_provisioning(cluster_name, nodes)
        slave = ci.environment.node['slave1']
        node = self._get_slave_node_by_devops_node(slave)
        wait(lambda: self._check_cluster_status(node['ip'], 5, 8), timeout=300)

        logging.info("Verifying networks for simple vlan installation.")
        vlans = self._get_cluster_vlans(cluster_id)
        slave2 = ci.environment.node['slave2']
        for vlan in vlans:
            for n in (slave, slave2):
                self._restore_vlan_in_ebtables(
                    n.interfaces[0].target_dev,
                    vlan,
                    False
                )
        task = self._run_network_verify(cluster_id)
        self._task_wait(task, 'Verify network simple vlan', 60 * 2)

    @snapshot_errors
    def test_ha_cluster_flat(self):
        logging.info("Testing ha flat installation.")
        self._revert_nodes()
        cluster_name = 'ha_flat'
        nodes = {
            'controller': ['slave1', 'slave2', 'slave3'],
            'compute': ['slave4', 'slave5']
        }
        cluster_id = self._basic_provisioning(cluster_name, nodes, 90 * 60)
        logging.info("Checking cluster status on slave1")
        slave = ci.environment.node['slave1']
        node = self._get_slave_node_by_devops_node(slave)
        wait(lambda: self._check_cluster_status(node['ip'], 13), timeout=300)

        logging.info("Verifying networks for ha flat installation.")
        vlans = self._get_cluster_vlans(cluster_id)
        slave2 = ci.environment.node['slave2']
        slave3 = ci.environment.node['slave3']
        slave4 = ci.environment.node['slave4']
        slave5 = ci.environment.node['slave5']
        for vlan in vlans:
            for n in (slave, slave2, slave3, slave4, slave5):
                self._restore_vlan_in_ebtables(
                    n.interfaces[0].target_dev,
                    vlan,
                    False
                )
        task = self._run_network_verify(cluster_id)
        self._task_wait(task, 'Verify network ha flat', 60 * 2)

    @snapshot_errors
    def test_ha_cluster_vlan(self):
        logging.info("Testing ha vlan installation.")
        self._revert_nodes()
        cluster_name = 'ha_vlan'
        nodes = {
            'controller': ['slave1', 'slave2', 'slave3'],
            'compute': ['slave4', 'slave5']
        }
        self._create_cluster(name=cluster_name, net_manager="VlanManager")
        cluster_id = self._basic_provisioning(cluster_name, nodes, 90 * 60)
        slave = ci.environment.node['slave1']
        node = self._get_slave_node_by_devops_node(slave)
        wait(
            lambda: self._check_cluster_status(node['ip'], 13, 8),
            timeout=300
        )

        logging.info("Verifying networks for ha vlan installation.")
        vlans = self._get_cluster_vlans(cluster_id)
        slave2 = ci.environment.node['slave2']
        slave3 = ci.environment.node['slave3']
        slave4 = ci.environment.node['slave4']
        slave5 = ci.environment.node['slave5']
        for vlan in vlans:
            for n in (slave, slave2, slave3, slave4, slave5):
                self._restore_vlan_in_ebtables(
                    n.interfaces[0].target_dev,
                    vlan,
                    False
                )
        task = self._run_network_verify(cluster_id)
        self._task_wait(task, 'Verify network ha vlan', 60 * 2)

    @snapshot_errors
    def test_network_config(self):
        self._revert_nodes()
        self._clean_clusters()
        self._basic_provisioning('network_config', {'controller': ['slave1']})

        slave = ci.environment.node['slave1']
        keyfiles = ci.environment.node['admin'].metadata['keyfiles']
        node = self._get_slave_node_by_devops_node(slave)
        ctrl_ssh = SSHClient()
        ctrl_ssh.connect_ssh(node['ip'], 'root', key_filename=keyfiles)
        ifaces_fail = False
        for iface in node['network_data']:
            try:
                ifname = "%s.%s@%s" % (
                    iface['dev'], iface['vlan'], iface['dev']
                )
                ifname_short = "%s.%s" % (iface['dev'], iface['vlan'])
            except KeyError:
                ifname = iface['dev']
            iface_data = ''.join(
                ctrl_ssh.execute(
                    '/sbin/ip addr show dev %s' % ifname_short
                )['stdout']
            )
            if iface_data.find(ifname) == -1:
                logging.error("Interface %s is absent" % ifname_short)
                ifaces_fail = True
            else:
                try:
                    if iface_data.find("inet %s" % iface['ip']) == -1:
                        logging.error(
                            "Interface %s does not have ip %s" % (
                                ifname_short, iface['ip']
                            )
                        )
                        ifaces_fail = True
                except KeyError:
                    if iface_data.find("inet ") != -1:
                        logging.error(
                            "Interface %s does have ip.  And it should not" %
                            ifname_short
                        )
                        ifaces_fail = True
                try:
                    if iface_data.find("brd %s" % iface['brd']) == -1:
                        logging.error(
                            "Interface %s does not have broadcast %s" % (
                                ifname_short, iface['brd']
                            )
                        )
                        ifaces_fail = True
                except KeyError:
                    pass
        self.assertEquals(ifaces_fail, False)

    @snapshot_errors
    def test_node_deletion(self):
        self._revert_nodes()
        cluster_name = 'node_deletion'
        node_name = 'slave1'
        nodes = {'controller': [node_name]}
        cluster_id = self._basic_provisioning(cluster_name, nodes)

        slave = ci.environment.node[node_name]
        node = self._get_slave_node_by_devops_node(slave)
        self.client.put("/api/nodes/%s/" % node['id'],
                        {'pending_deletion': True})
        task = self._launch_provisioning(cluster_id)
        self._task_wait(task, 'Node deletion', 60)

        timer = time.time()
        timeout = 5 * 60
        while True:
            response = self.client.get("/api/nodes/")
            nodes = json.loads(response.read())
            for n in nodes:
                if (n['mac'] == node['mac'] and n['status'] == 'discover'):
                    return
            if (time.time() - timer) > timeout:
                raise Exception("Bootstrap boot timeout expired!")
            time.sleep(5)

    @snapshot_errors
    def test_network_verify_with_blocked_vlan(self):
        self._revert_nodes()
        cluster_name = 'net_verify'
        cluster_id = self._create_cluster(name=cluster_name)
        node_names = ['slave1', 'slave2']
        nailgun_slave_nodes = self._bootstrap_nodes(node_names)
        devops_nodes = [ci.environment.node[n] for n in node_names]
        logging.info("Clear BROUTING table entries.")
        vlans = self._get_cluster_vlans(cluster_id)
        for vlan in vlans:
            for node in devops_nodes:
                for interface in node.interfaces:
                    self._restore_vlan_in_ebtables(interface.target_dev,
                                                   vlan, False)
        self._update_nodes_in_cluster(cluster_id, nailgun_slave_nodes)
        for node in devops_nodes:
            for interface in node.interfaces:
                self._block_vlan_in_ebtables(interface.target_dev, vlans[0])
        task = self._run_network_verify(cluster_id)
        task = self._task_wait(task,
                               'Verify network in cluster with blocked vlan',
                               60 * 2, True)
        self.assertEquals(task['status'], 'error')

    @snapshot_errors
    def test_multinic_bootstrap_booting(self):
        self._revert_nodes()
        slave = filter(lambda n: n.name != 'admin' and len(n.interfaces) > 1,
                       ci.environment.nodes)[0]
        nodename = slave.name
        logging.info("Using node %r with %d interfaces", nodename,
                     len(slave.interfaces))
        slave.stop()
        admin = ci.environment.node['admin']
        macs = [i.mac_address for i in slave.interfaces]
        logging.info("Block all MACs: %s.",
                     ', '.join([m for m in macs]))
        for mac in macs:
            self._block_mac_in_ebtables(mac)
            self.addCleanup(self._restore_mac_in_ebtables, mac)
        for mac in macs:
            logging.info("Trying to boot node %r via interface with MAC %s...",
                         nodename, mac)
            self._restore_mac_in_ebtables(mac)
            slave.start()
            nailgun_slave = self._bootstrap_nodes([nodename])[0]
            self.assertEqual(mac.upper(), nailgun_slave['mac'].upper())
            slave.stop()
            admin.restore_snapshot('initial')
            self._block_mac_in_ebtables(mac)

    @staticmethod
    def _block_mac_in_ebtables(mac):
        try:
            subprocess.check_output(
                'sudo ebtables -t filter -A FORWARD -s %s -j DROP' % mac,
                stderr=subprocess.STDOUT,
                shell=True
            )
            logging.debug("MAC %s blocked via ebtables.", mac)
        except subprocess.CalledProcessError as e:
            raise Exception("Can't block mac %s via ebtables: %s",
                            mac, e.output)

    @staticmethod
    def _restore_mac_in_ebtables(mac):
        try:
            subprocess.check_output(
                'sudo ebtables -t filter -D FORWARD -s %s -j DROP' % mac,
                stderr=subprocess.STDOUT,
                shell=True
            )
            logging.debug("MAC %s unblocked via ebtables.", mac)
        except subprocess.CalledProcessError as e:
            logging.warn("Can't restore mac %s via ebtables: %s",
                         mac, e.output)

    def _block_vlan_in_ebtables(self, target_dev, vlan):
        try:
            subprocess.check_output(
                'sudo ebtables -t broute -A BROUTING -i %s -p 8021Q'
                ' --vlan-id %s -j DROP' % (
                    target_dev, vlan
                ),
                stderr=subprocess.STDOUT,
                shell=True
            )
            self.addCleanup(self._restore_vlan_in_ebtables,
                            target_dev, vlan)
            logging.debug("Vlan %s on interface %s blocked via ebtables.",
                          vlan, target_dev)
        except subprocess.CalledProcessError as e:
            raise Exception("Can't block vlan %s for interface %s"
                            " via ebtables: %s" %
                            (vlan, target_dev, e.output))

    def _get_cluster_vlans(self, cluster_id):
        resp = self.client.get(
            '/api/clusters/%d/network_configuration/' % cluster_id)
        self.assertEquals(200, resp.getcode())
        cluster_vlans = []
        networks = json.loads(resp.read())['networks']
        for n in networks:
            amount = n.get('amount', 1)
            cluster_vlans.extend(range(n['vlan_start'],
                                       n['vlan_start'] + amount))
        self.assertNotEqual(cluster_vlans, [])
        return cluster_vlans

    @staticmethod
    def _restore_vlan_in_ebtables(target_dev, vlan, log=True):
        try:
            subprocess.check_output(
                'sudo ebtables -t broute -D BROUTING -i %s -p 8021Q'
                ' --vlan-id %s -j DROP' % (
                    target_dev, vlan
                ),
                stderr=subprocess.STDOUT,
                shell=True
            )
            logging.debug("Vlan %s on interface %s unblocked via ebtables.",
                          vlan, target_dev)
        except subprocess.CalledProcessError as e:
            if log:
                logging.warn("Can't restore vlan %s for interface %s"
                             " via ebtables: %s" %
                             (vlan, target_dev, e.output))

    def _run_network_verify(self, cluster_id):
        logging.info(
            "Run network verify in cluster %d",
            cluster_id
        )
        resp = self.client.get(
            '/api/clusters/%d/network_configuration/' % cluster_id)
        self.assertEquals(200, resp.getcode())
        network_configuration = json.loads(resp.read())
        changes = self.client.put(
            '/api/clusters/%d/network_configuration/verify/' % cluster_id,
            network_configuration
        )
        self.assertEquals(200, changes.getcode())
        return json.loads(changes.read())

    def _basic_provisioning(self, cluster_name, nodes_dict,
                            task_timeout=30 * 60):
        self._start_logserver()
        self._clean_clusters()
        cluster_id = self._create_cluster(name=cluster_name)

        # Here we updating cluster editable attributes
        # In particular we set extra syslog server
        response = self.client.get(
            "/api/clusters/%s/attributes/" % cluster_id
        )
        attrs = json.loads(response.read())
        attrs["editable"]["syslog"]["syslog_server"]["value"] = \
            self.get_host_node_ip()
        attrs["editable"]["syslog"]["syslog_port"]["value"] = \
            self.logserver.bound_port()
        self.client.put(
            "/api/clusters/%s/attributes/" % cluster_id,
            attrs
        )

        node_names = []
        for role in nodes_dict:
            node_names += nodes_dict[role]
        try:
            if len(node_names) > 1:
                if len(nodes_dict['controller']) == 1:
                    self.client.put(
                        "/api/clusters/%s/" % cluster_id,
                        {"mode": "multinode"}
                    )
                if len(nodes_dict['controller']) > 1:
                    self.client.put(
                        "/api/clusters/%s/" % cluster_id,
                        {"mode": "ha"}
                    )
        except KeyError:
            pass

        nodes = self._bootstrap_nodes(node_names)

        for role in nodes_dict:
            for n in nodes_dict[role]:
                slave = ci.environment.node[n]
                node = self._get_slave_node_by_devops_node(slave)
                self.client.put(
                    "/api/nodes/%s/" % node['id'],
                    {"role": role, "pending_addition": True}
                )

        self._update_nodes_in_cluster(cluster_id, nodes)
        task = self._launch_provisioning(cluster_id)

        self._task_wait(task, 'Installation', task_timeout)

        logging.info("Checking role files on slave nodes")
        keyfiles = ci.environment.node['admin'].metadata['keyfiles']
        for role in nodes_dict:
            for n in nodes_dict[role]:
                logging.info("Checking /tmp/%s-file on %s" % (role, n))
                slave = ci.environment.node[n]
                node = self._get_slave_node_by_devops_node(slave)
                ctrl_ssh = SSHClient()
                for i in node['meta']['interfaces']:
                    ip = i.get('ip', None)
                    if ip:
                        logging.debug("Trying to connect to %s via ssh", ip)
                        try:
                            ctrl_ssh.connect_ssh(ip, 'root',
                                                 key_filename=keyfiles)
                        except Exception, e:
                            logging.debug("Unable to connect to %s: %s", ip,
                                          str(e))
                            continue
                        ret = ctrl_ssh.execute('test -f /tmp/%s-file' % role)
                        self.assertEquals(ret['exit_status'], 0,
                                          ("File '/tmp/%s-file' not found" %
                                           role))
                        ctrl_ssh.disconnect()
                        break
                    else:
                        i_name = i.get('name') or i.get('mac') or str(i)
                        logging.debug("Interface doesn't have an IP: %r",
                                      i_name)
                self.assertNotEqual(ip, None, "Unable to fing a valid IP"
                                              " for node %s" % n)
        return cluster_id