Esempio n. 1
0
 def update_node_interfaces(self, node_id):
     interfaces = self.download_node_interfaces(node_id)
     logger.debug("interfaces we get {}".format(interfaces))
     assigned_networks = {
         iface_alias('eth0'): [{
             'id': 1,
             'name': 'fuelweb_admin'
         }],
         iface_alias('eth1'): [{
             'id': 2,
             'name': 'public'
         }],
         iface_alias('eth2'): [{
             'id': 3,
             'name': 'management'
         }],
         iface_alias('eth3'): [{
             'id': 5,
             'name': 'private'
         }],
         iface_alias('eth4'): [{
             'id': 4,
             'name': 'storage'
         }],
     }
     for interface in interfaces:
         name = interface['name']
         net_to_assign = assigned_networks.get(name, None)
         if net_to_assign:
             interface['assigned_networks'] = net_to_assign
     logger.debug("interfaces after update {}".format(interfaces))
     self.upload_node_interfaces(node_id, interfaces)
Esempio n. 2
0
    def test_untagged_networks_negative(self):
        """Verify network verification fails with untagged network on eth0

        Scenario:
            1. Create cluster in ha mode
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 1 node with compute cinder
            5. Split networks on existing physical interfaces
            6. Remove VLAN tagging from networks which are on eth0
            7. Run network verification (assert it fails)
            8. Start cluster deployment (assert it fails)

        Duration 30m

        """
        fuel_web = self.manager.fuel_web
        vlan_turn_off = {'vlan_start': None}
        interfaces = {
            iface_alias('eth0'): ["fixed"],
            iface_alias('eth1'): ["public"],
            iface_alias('eth2'): ["management", "storage"],
            iface_alias('eth3'): []
        }

        self.manager.show_step(1)
        cluster_id = fuel_web.create_cluster(
            name=self.cluster_config['name'],
            mode=self.cluster_config['mode'],
        )
        self.manager.show_step(2)
        self.manager.show_step(3)
        self.manager.show_step(4)
        fuel_web.update_nodes(
            cluster_id,
            self.cluster_config['nodes']
        )

        self.manager.show_step(5)
        nets = fuel_web.client.get_networks(cluster_id)['networks']
        nailgun_nodes = fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            fuel_web.update_node_networks(node['id'], interfaces)

        self.manager.show_step(6)
        # select networks that will be untagged:
        for net in nets:
            net.update(vlan_turn_off)

        # stop using VLANs:
        fuel_web.client.update_network(cluster_id, networks=nets)

        self.manager.show_step(7)
        # run network check:
        fuel_web.verify_network(cluster_id, success=False)

        self.manager.show_step(8)
        # deploy cluster:
        task = fuel_web.deploy_cluster(cluster_id)
        fuel_web.assert_task_failed(task)
Esempio n. 3
0
    def untagged_networks_negative(self):
        """Verify network verification fails with untagged network on eth0

        Test disabled and move to fuel_tests suite:
            fuel_tests.test.test_l2_network_config.TestL2NetworkConfig

        Scenario:
            1. Create cluster in ha mode
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Split networks on existing physical interfaces
            5. Remove VLAN tagging from networks which are on eth0
            6. Run network verification (assert it fails)
            7. Start cluster deployment (assert it fails)

        Duration 30m

        """
        # pylint: disable=W0101
        warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
        raise SkipTest("Test disabled and move to fuel_tests suite")

        self.env.revert_snapshot("ready_with_3_slaves")

        vlan_turn_off = {'vlan_start': None}
        interfaces = {
            iface_alias('eth0'): ["fixed"],
            iface_alias('eth1'): ["public"],
            iface_alias('eth2'): ["management", "storage"],
            iface_alias('eth3'): []
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })

        nets = self.fuel_web.client.get_networks(cluster_id)['networks']
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(node['id'], interfaces)

        # select networks that will be untagged:
        for net in nets:
            net.update(vlan_turn_off)

        # stop using VLANs:
        self.fuel_web.client.update_network(cluster_id, networks=nets)

        # run network check:
        self.fuel_web.verify_network(cluster_id, success=False)

        # deploy cluster:
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_failed(task)
    def deploy_node_multiple_interfaces(self):
        """Deploy cluster with networks allocated on different interfaces

        Test disabled and move to fuel_tests suite:
            fuel_tests.test.test_l2_network_config.TestL2NetworkConfig

        Scenario:
            1. Create cluster in Ha mode
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 1 node with cinder role
            5. Split networks on existing physical interfaces
            6. Deploy the cluster
            7. Verify network configuration on each deployed node
            8. Run network verification

        Duration 25m
        Snapshot: deploy_node_multiple_interfaces

        """
        # pylint: disable=W0101
        warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
        raise SkipTest("Test disabled and move to fuel_tests suite")

        self.env.revert_snapshot("ready_with_3_slaves")

        interfaces_dict = {
            iface_alias('eth0'): ['fuelweb_admin'],
            iface_alias('eth1'): ['public'],
            iface_alias('eth2'): ['storage'],
            iface_alias('eth3'): ['private'],
            iface_alias('eth4'): ['management'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(node['id'], interfaces_dict)

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.env.make_snapshot("deploy_node_multiple_interfaces", is_make=True)
Esempio n. 5
0
    def untagged_networks_negative(self):
        """Verify network verification fails with untagged network on eth0

        Scenario:
            1. Create cluster in ha mode
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Split networks on existing physical interfaces
            5. Remove VLAN tagging from networks which are on eth0
            6. Run network verification (assert it fails)
            7. Start cluster deployment (assert it fails)

        Duration 30m

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        vlan_turn_off = {'vlan_start': None}
        interfaces = {
            iface_alias('eth0'): ["fixed"],
            iface_alias('eth1'): ["public"],
            iface_alias('eth2'): ["management", "storage"],
            iface_alias('eth3'): []
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute']
            }
        )

        nets = self.fuel_web.client.get_networks(cluster_id)['networks']
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(node['id'], interfaces)

        # select networks that will be untagged:
        for net in nets:
            net.update(vlan_turn_off)

        # stop using VLANs:
        self.fuel_web.client.update_network(cluster_id, networks=nets)

        # run network check:
        self.fuel_web.verify_network(cluster_id, success=False)

        # deploy cluster:
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_failed(task)
Esempio n. 6
0
    def deploy_node_multiple_interfaces(self):
        """Deploy cluster with networks allocated on different interfaces

        Test disabled and move to fuel_tests suite:
            fuel_tests.test.test_l2_network_config.TestL2NetworkConfig

        Scenario:
            1. Create cluster in Ha mode
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 1 node with cinder role
            5. Split networks on existing physical interfaces
            6. Deploy the cluster
            7. Verify network configuration on each deployed node
            8. Run network verification

        Duration 25m
        Snapshot: deploy_node_multiple_interfaces

        """
        # pylint: disable=W0101
        warn("Test disabled and move to fuel_tests suite", DeprecationWarning)
        raise SkipTest("Test disabled and move to fuel_tests suite")

        self.env.revert_snapshot("ready_with_3_slaves")

        interfaces_dict = {
            iface_alias('eth0'): ['fuelweb_admin'],
            iface_alias('eth1'): ['public'],
            iface_alias('eth2'): ['storage'],
            iface_alias('eth3'): ['private'],
            iface_alias('eth4'): ['management'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            })
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(node['id'], interfaces_dict)

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.env.make_snapshot("deploy_node_multiple_interfaces", is_make=True)
Esempio n. 7
0
    def check_can_not_enable_dpdk_on_non_dedicated_iface(self):
        """Check can not enable DPDK on non-dedicated interface

        Scenario:
            1. Create new environment with VLAN segmentation for Neutron
            2. Set KVM as Hypervisor
            3. Add controller and compute nodes
            4. Configure HugePages for compute nodes
            5. Add private and storage networks to interface
               and try enable DPDK mode
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": "vlan"
            }
        )

        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            })

        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'], role_status='pending_roles')[0]

        self.show_step(4)
        self.setup_hugepages(compute, hp_2mb=256, hp_dpdk_mb=128)

        self.show_step(5)
        assigned_networks = {
            settings.iface_alias('eth0'): ['fuelweb_admin'],
            settings.iface_alias('eth1'): ['public'],
            settings.iface_alias('eth2'): ['management'],
            settings.iface_alias('eth3'): ['private', 'storage'],
            settings.iface_alias('eth4'): []
        }
        self.fuel_web.update_node_networks(compute['id'],
                                           interfaces_dict=assigned_networks)
        assert_raises(
            exceptions.BadRequest,
            self.enable_dpdk, compute,
            forceEnable=True)
Esempio n. 8
0
    def test_deploy_node_multiple_interfaces(self):
        """Deploy cluster with networks allocated on different interfaces

        Scenario:
            1. Create cluster in Ha mode
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 1 node with cinder role
            5. Split networks on existing physical interfaces
            6. Deploy the cluster
            7. Verify network configuration on each deployed node
            8. Run network verification

        Duration 25m
        Snapshot: deploy_node_multiple_interfaces

        """
        # self.env.revert_snapshot("ready_with_3_slaves")

        fuel_web = self.manager.fuel_web
        interfaces_dict = {
            iface_alias('eth0'): ['fuelweb_admin'],
            iface_alias('eth1'): ['public'],
            iface_alias('eth2'): ['storage'],
            iface_alias('eth3'): ['private'],
            iface_alias('eth4'): ['management'],
        }
        self.manager.show_step(1)
        cluster_id = fuel_web.create_cluster(
            name=self.cluster_config['name'],
            mode=self.cluster_config['mode'],
        )
        self.manager.show_step(2)
        self.manager.show_step(3)
        self.manager.show_step(4)
        fuel_web.update_nodes(
            cluster_id,
            self.cluster_config['nodes']
        )
        self.manager.show_step(5)
        nailgun_nodes = fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            fuel_web.update_node_networks(node['id'], interfaces_dict)

        self.manager.show_step(6)
        fuel_web.deploy_cluster_wait(cluster_id)

        self.manager.show_step(7)
        fuel_web.verify_network(cluster_id)
    def test_deploy_node_multiple_interfaces(self):
        """Deploy cluster with networks allocated on different interfaces

        Scenario:
            1. Create cluster in Ha mode
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 1 node with cinder role
            5. Split networks on existing physical interfaces
            6. Deploy the cluster
            7. Verify network configuration on each deployed node
            8. Run network verification

        Duration 25m
        Snapshot: deploy_node_multiple_interfaces

        """
        # self.env.revert_snapshot("ready_with_3_slaves")

        fuel_web = self.manager.fuel_web
        interfaces_dict = {
            iface_alias('eth1'): ['public'],
            iface_alias('eth2'): ['storage'],
            iface_alias('eth3'): ['private'],
            iface_alias('eth4'): ['management'],
        }
        self.manager.show_step(1)
        cluster_id = fuel_web.create_cluster(
            name=self.cluster_config['name'],
            mode=self.cluster_config['mode'],
        )
        self.manager.show_step(2)
        self.manager.show_step(3)
        self.manager.show_step(4)
        fuel_web.update_nodes(
            cluster_id,
            self.cluster_config['nodes']
        )
        self.manager.show_step(5)
        nailgun_nodes = fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            fuel_web.update_node_networks(node['id'], interfaces_dict)

        self.manager.show_step(6)
        fuel_web.deploy_cluster_wait(cluster_id)

        self.manager.show_step(7)
        fuel_web.verify_network(cluster_id)
Esempio n. 10
0
 def update_node_interfaces(self, node_id):
     interfaces = self.download_node_interfaces(node_id)
     logger.debug("interfaces we get {}".format(interfaces))
     assigned_networks = {
         iface_alias('eth0'): [{'id': 1, 'name': 'fuelweb_admin'}],
         iface_alias('eth1'): [{'id': 2, 'name': 'public'}],
         iface_alias('eth2'): [{'id': 3, 'name': 'management'}],
         iface_alias('eth3'): [{'id': 5, 'name': 'private'}],
         iface_alias('eth4'): [{'id': 4, 'name': 'storage'}],
     }
     for interface in interfaces:
         name = interface['name']
         net_to_assign = assigned_networks.get(name, None)
         if net_to_assign:
             interface['assigned_networks'] = net_to_assign
     logger.debug("interfaces after update {}".format(interfaces))
     self.upload_node_interfaces(node_id, interfaces)
 def update_node_interfaces(self, node_id):
     interfaces = self.download_node_interfaces(node_id)
     logger.debug("interfaces we get {}".format(interfaces))
     assigned_networks = {
         iface_alias("eth0"): [{"id": 1, "name": "fuelweb_admin"}],
         iface_alias("eth1"): [{"id": 2, "name": "public"}],
         iface_alias("eth2"): [{"id": 3, "name": "management"}],
         iface_alias("eth3"): [{"id": 5, "name": "private"}],
         iface_alias("eth4"): [{"id": 4, "name": "storage"}],
     }
     for interface in interfaces:
         name = interface["name"]
         net_to_assign = assigned_networks.get(name, None)
         if net_to_assign:
             interface["assigned_networks"] = net_to_assign
     logger.debug("interfaces after update {}".format(interfaces))
     self.upload_node_interfaces(node_id, interfaces)
Esempio n. 12
0
    def dhcrelay_check(self):
        # CentOS 7 is pretty stable with admin iface.
        # TODO(akostrikov) refactor it.
        iface = iface_alias("eth0")
        command = "dhcpcheck discover " "--ifaces {iface} " "--repeat 3 " "--timeout 10".format(iface=iface)

        out = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=command)["stdout"]

        assert_true(self.get_admin_node_ip() in "".join(out), "dhcpcheck doesn't discover master ip")
Esempio n. 13
0
    def update_vm_node_interfaces(self, obj, cluster_id):
        """Update network interfaces for node."""
        assigned_networks = {
            iface_alias('eth0'): ['fuelweb_admin', 'storage', 'management'],
            iface_alias('eth1'): ['public'],
            iface_alias('eth3'): ['private'],
        }
        logger.info('Assigned networks are: {}'.format(str(assigned_networks)))

        nailgun_nodes = obj.fuel_web.client.list_cluster_nodes(cluster_id)
        baremetal_macs = self.conf['target_macs']
        for node in nailgun_nodes:
            if not node['status'] == 'discover':
                # Skip not discovered nodes
                continue
            if node['mac'] in baremetal_macs:
                # Skip Barematal node
                continue
            obj.fuel_web.update_node_networks(node['id'], assigned_networks)
Esempio n. 14
0
    def get_keys(self,
                 node,
                 custom=None,
                 build_images=None,
                 iso_connect_as='cdrom'):
        params = {
            'device_label':
            settings.ISO_LABEL,
            'iface':
            iface_alias('eth0'),
            'ip':
            node.get_ip_address_by_network_name('admin'),
            'mask':
            self.d_env.get_network(name='admin').ip.netmask,
            'gw':
            self.d_env.get_default_gw(),
            'hostname':
            ''.join((settings.FUEL_MASTER_HOSTNAME, settings.DNS_SUFFIX)),
            'nat_interface':
            '',
            'nameserver':
            settings.DNS,
            'showmenu':
            'yes' if settings.SHOW_FUELMENU else 'no',
            'wait_for_external_config':
            'yes',
            'build_images':
            '1' if build_images else '0',
            'MASTER_NODE_EXTRA_PACKAGES':
            settings.MASTER_NODE_EXTRA_PACKAGES
        }
        # TODO(akostrikov) add tests for menu items/kernel parameters
        # TODO(akostrikov) refactor it.
        if iso_connect_as == 'usb':
            keys = (
                "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                "<F12>\n"
                "2\n")
        else:  # cdrom is default
            keys = ("<Wait>\n" "<Wait>\n" "<Wait>\n")

        keys += (
            "<Esc>\n"
            "<Wait>\n"
            "vmlinuz initrd=initrd.img"
            " inst.ks=cdrom:LABEL=%(device_label)s:/ks.cfg"
            " inst.repo=cdrom:LABEL=%(device_label)s:/"
            " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
            ":%(iface)s:off::: nameserver=%(nameserver)s"
            " showmenu=%(showmenu)s\n"
            " wait_for_external_config=%(wait_for_external_config)s"
            " build_images=%(build_images)s\n"
            " MASTER_NODE_EXTRA_PACKAGES='%(MASTER_NODE_EXTRA_PACKAGES)s'\n"
            " <Enter>\n") % params
        return keys
Esempio n. 15
0
    def setup_centos_master(self):
        """Create environment, bootstrap centos_master
        and install fuel services

        Snapshot "empty_centos"

            1. bootstrap_centos_master
            2. Download fuel_release from remote repository
            3. install fuel_setup package
            4. Install Fuel services by executing bootstrap_admin_node.sh
            5. check Fuel services


        """
        self.check_run("empty_centos")
        self.show_step(1, initialize=True)
        cloud_image_settings_path = os.path.join(
            os.path.dirname(fuelweb_test.__file__),
            'cloud_image_settings/cloud_settings.iso')

        admin_net_object = self.env.d_env.get_network(
            name=self.env.d_env.admin_net)
        admin_network = admin_net_object.ip.network
        admin_netmask = admin_net_object.ip.netmask
        admin_ip = str(self.env.d_env.nodes(
        ).admin.get_ip_address_by_network_name(self.env.d_env.admin_net))
        interface_name = settings.iface_alias("eth0")
        gateway = self.env.d_env.router()
        dns = settings.DNS
        dns_ext = ''.join(settings.EXTERNAL_DNS)
        hostname = ''.join((settings.FUEL_MASTER_HOSTNAME,
                            settings.DNS_SUFFIX))
        user = settings.SSH_FUEL_CREDENTIALS['login']
        password = settings.SSH_FUEL_CREDENTIALS['password']
        generate_cloud_image_settings(cloud_image_settings_path, admin_network,
                                      interface_name, admin_ip, admin_netmask,
                                      gateway, dns, dns_ext,
                                      hostname, user, password)

        with TimeStat("bootstrap_centos_node", is_uniq=True):
            admin = self.env.d_env.nodes().admin
            logger.info(cloud_image_settings_path)
            admin.disk_devices.get(
                device='cdrom').volume.upload(cloud_image_settings_path)
            self.env.d_env.start([admin])
            logger.info("Waiting for Centos node to start up")
            wait(lambda: admin.driver.node_active(admin), 60)
            logger.info("Waiting for Centos node ssh ready")
            self.env.wait_for_provisioning()

        self.centos_setup_fuel(hostname)

        self.env.make_snapshot("empty", is_make=True)
Esempio n. 16
0
    def __init__(self):
        super(BondingTestDPDK, self).__init__()
        self.BOND_CONFIG = [
            {
                'mac': None,
                'mode': 'active-backup',
                'name': 'bond0',
                'slaves': [
                    {'name': iface_alias('eth3')},
                    {'name': iface_alias('eth2')}
                ],
                'state': None,
                'type': 'bond',
                'assigned_networks': [],
                'interface_properties': {'dpdk': {'available': True}},
                'bond_properties': {'mode': 'active-backup',
                                    'type__': 'linux'},
            },
            {
                'mac': None,
                'mode': 'active-backup',
                'name': 'bond1',
                'slaves': [
                    {'name': iface_alias('eth1')},
                    {'name': iface_alias('eth0')}
                ],
                'state': None,
                'type': 'bond',
                'assigned_networks': [],
                'interface_properties': {'dpdk': {'available': True}},
                'bond_properties': {'mode': 'active-backup',
                                    'type__': 'linux'},
            },
            {
                'mac': None,
                'mode': 'active-backup',
                'name': 'bond2',
                'slaves': [
                    {'name': iface_alias('eth5')},
                    {'name': iface_alias('eth4')},
                ],
                'state': None,
                'type': 'bond',
                'assigned_networks': [],
                'interface_properties': {'dpdk': {'available': True}},
                'bond_properties': {'mode': 'active-backup',
                                    'type__': 'linux'},
            },
        ]

        self.INTERFACES = {
            'bond0': [
                'public',
                'management',
                'storage',
            ],
            'bond1': ['fuelweb_admin'],
            'bond2': ['private'],
        }
Esempio n. 17
0
    def dhcrelay_check(self):
        # CentOS 7 is pretty stable with admin iface.
        # TODO(akostrikov) refactor it.
        iface = iface_alias('eth0')
        command = "dhcpcheck discover " \
                  "--ifaces {iface} " \
                  "--repeat 3 " \
                  "--timeout 10".format(iface=iface)

        out = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                       cmd=command)['stdout']

        assert_true(self.get_admin_node_ip() in "".join(out),
                    "dhcpcheck doesn't discover master ip")
Esempio n. 18
0
    def get_keys(self, node, custom=None, build_images=None,
                 iso_connect_as='cdrom'):
        params = {
            'device_label': settings.ISO_LABEL,
            'iface': iface_alias('eth0'),
            'ip': node.get_ip_address_by_network_name(
                self.d_env.admin_net),
            'mask': self.d_env.get_network(
                name=self.d_env.admin_net).ip.netmask,
            'gw': self.d_env.router(),
            'hostname': ''.join((settings.FUEL_MASTER_HOSTNAME,
                                 settings.DNS_SUFFIX)),
            'nat_interface': self.d_env.nat_interface,
            'nameserver': settings.DNS,
            'showmenu': 'yes' if settings.SHOW_FUELMENU else 'no',
            'wait_for_external_config': 'yes',
            'build_images': '1' if build_images else '0',
            'MASTER_NODE_EXTRA_PACKAGES': settings.MASTER_NODE_EXTRA_PACKAGES
        }
        # TODO(akostrikov) add tests for menu items/kernel parameters
        # TODO(akostrikov) refactor it.
        if iso_connect_as == 'usb':
            keys = (
                "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                "<F12>\n"
                "2\n"
            )
        else:  # cdrom is default
            keys = (
                "<Wait>\n"
                "<Wait>\n"
                "<Wait>\n"
            )

        keys += (
            "<Esc>\n"
            "<Wait>\n"
            "vmlinuz initrd=initrd.img"
            " inst.ks=cdrom:LABEL=%(device_label)s:/ks.cfg"
            " inst.repo=cdrom:LABEL=%(device_label)s:/"
            " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
            ":%(iface)s:off::: nameserver=%(nameserver)s"
            " showmenu=%(showmenu)s\n"
            " wait_for_external_config=%(wait_for_external_config)s"
            " build_images=%(build_images)s\n"
            " MASTER_NODE_EXTRA_PACKAGES='%(MASTER_NODE_EXTRA_PACKAGES)s'\n"
            " <Enter>\n"
        ) % params
        return keys
Esempio n. 19
0
    def __init__(self):
        self.TEMPLATE_OLD_SERIALIZATION_BOND_CONFIG = {
            'mac': None,
            'mode': 'active-backup',
            'state': None,
            'type': 'bond',
            'assigned_networks': [],
            'bond_properties': {
                'mode': 'active-backup',
                'type__': 'linux'
            }
        }

        self.TEMPLATE_NEW_SERIALIZATION_BOND_CONFIG = {
            'mac': None,
            'mode': 'active-backup',
            'state': None,
            'type': 'bond',
            'assigned_networks': [],
            'attributes': {
                'type__': {
                    'type': 'hidden',
                    'value': 'linux'
                }
            }
        }

        self.INTERFACES = {
            'bond0': ['public', 'management', 'storage', 'private'],
            'bond1': ['fuelweb_admin']
        }
        self.BOND_LIST = [{
            'name':
            'bond0',
            'slaves': [{
                'name': iface_alias('eth5')
            }, {
                'name': iface_alias('eth4')
            }, {
                'name': iface_alias('eth3')
            }, {
                'name': iface_alias('eth2')
            }]
        }, {
            'name':
            'bond1',
            'slaves': [{
                'name': iface_alias('eth1')
            }, {
                'name': iface_alias('eth0')
            }]
        }]
        self.BOND_ATTR = {}
        super(BondingTest, self).__init__()
        self.__cluster_id = None
        self.__bond_config = None
Esempio n. 20
0
    def get_keys(self, node, custom=None, build_images=None, iso_connect_as="cdrom"):
        params = {
            "device_label": settings.ISO_LABEL,
            "iface": iface_alias("eth0"),
            "ip": node.get_ip_address_by_network_name(self.d_env.admin_net),
            "mask": self.d_env.get_network(name=self.d_env.admin_net).ip.netmask,
            "gw": self.d_env.router(),
            "hostname": "".join((settings.FUEL_MASTER_HOSTNAME, settings.DNS_SUFFIX)),
            "nat_interface": self.d_env.nat_interface,
            "nameserver": settings.DNS,
            "showmenu": "yes" if settings.SHOW_FUELMENU else "no",
            "wait_for_external_config": "yes",
            "build_images": "1" if build_images else "0",
            "MASTER_NODE_EXTRA_PACKAGES": settings.MASTER_NODE_EXTRA_PACKAGES,
        }
        # TODO(akostrikov) add tests for menu items/kernel parameters
        # TODO(akostrikov) refactor it.
        if iso_connect_as == "usb":
            keys = "<Wait>\n" "<F12>\n" "2\n"  # USB boot uses boot_menu=yes for master node
        else:  # cdrom is default
            keys = "<Wait>\n" "<Wait>\n" "<Wait>\n"

        keys += (
            "<Esc>\n"
            "<Wait>\n"
            "vmlinuz initrd=initrd.img"
            " inst.ks=cdrom:LABEL=%(device_label)s:/ks.cfg"
            " inst.repo=cdrom:LABEL=%(device_label)s:/"
            " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
            ":%(iface)s:off::: nameserver=%(nameserver)s"
            " showmenu=%(showmenu)s\n"
            " wait_for_external_config=%(wait_for_external_config)s"
            " build_images=%(build_images)s\n"
            " MASTER_NODE_EXTRA_PACKAGES='%(MASTER_NODE_EXTRA_PACKAGES)s'\n"
            " <Enter>\n"
        ) % params
        return keys
Esempio n. 21
0
    def __init__(self):
        self.BOND_CONFIG = [
            {
                'mac': None,
                'mode': 'active-backup',
                'name': 'bond0',
                'slaves': [
                    {'name': iface_alias('eth5')},
                    {'name': iface_alias('eth4')},
                    {'name': iface_alias('eth3')},
                    {'name': iface_alias('eth2')}
                ],
                'state': None,
                'type': 'bond',
                'assigned_networks': []
            },
            {
                'mac': None,
                'mode': 'active-backup',
                'name': 'bond1',
                'slaves': [
                    {'name': iface_alias('eth1')},
                    {'name': iface_alias('eth0')}
                ],
                'state': None,
                'type': 'bond',
                'assigned_networks': []
            }
        ]

        self.INTERFACES = {
            'bond0': [
                'public',
                'management',
                'storage',
                'private'
            ],
            'bond1': ['fuelweb_admin']
        }
        super(BondingTest, self).__init__()
    def ha_ceilometer_untag_network(self):
        """Deployment with 3 controllers, NeutronVLAN and untag network,
           with Ceilometer

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Choose Ceilometer
            4. Add 3 controller
            5. Add 1 compute
            6. Add 3 mongo+cinder
            7. Move Storage network to eth1 and specify vlan start
            8. Move Management network to eth2 and untag it
            9. Verify networks
            10. Deploy the environment
            11. Verify networks
            12. Run OSTF tests

        Duration 180m
        Snapshot ha_ceilometer_untag_network
        """
        self.env.revert_snapshot("ready_with_9_slaves")
        data = {
            "ceilometer": True,
            "tenant": "mongomultirole",
            "user": "******",
            "password": "******",
            "net_provider": "neutron",
            "net_segment_type": settings.NEUTRON_SEGMENT["vlan"],
        }

        self.show_step(1, initialize=True)
        self.show_step(2)
        self.show_step(3)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, settings=data)

        self.show_step(4)
        self.show_step(5)
        self.show_step(6)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["mongo", "cinder"],
                "slave-06": ["mongo", "cinder"],
                "slave-07": ["mongo", "cinder"],
            },
        )
        self.show_step(7)
        self.show_step(8)
        vlan_turn_on = {"vlan_start": 102}
        interfaces = {
            iface_alias("eth0"): ["private"],
            iface_alias("eth1"): ["storage", "public"],
            iface_alias("eth2"): ["management"],
            iface_alias("eth3"): [],
            iface_alias("eth4"): [],
        }

        nets = self.fuel_web.client.get_networks(cluster_id)["networks"]
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(node["id"], interfaces)

        for net in nets:
            if net["name"] == "storage":
                net.update(vlan_turn_on)

        self.fuel_web.client.update_network(cluster_id, networks=nets)

        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(10)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(11)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(12)
        self.fuel_web.run_ostf(cluster_id)
        self.env.make_snapshot("ha_ceilometer_untag_network")
    def check_bonding_with_contrail(self):
        """Verify bonding with Contrail Plugin

        Scenario:
            1. Revert snapshot "ready_with_5_slaves"
            2. Create cluster
            3. Add 3 nodes with Operating system role,
               1 node with controller role and 1 node with compute role
            4. Enable Contrail plugin
            5. Setup bonding for management and storage interfaces
            6. Deploy cluster with plugin
            7. Run OSTF tests

        Duration 140 min

        """
        self._prepare_contrail_plugin(slaves=5)

        # create cluster: 3 nodes with Operating system role,
        # 1 node with controller and 1 node with compute roles
        self.fuel_web.update_nodes(
            self.cluster_id,
            {
                'slave-01': ['base-os'],
                'slave-02': ['base-os'],
                'slave-03': ['base-os'],
                'slave-04': ['controller'],
                'slave-05': ['compute']
            },
            custom_names={
                'slave-01': 'contrail-1',
                'slave-02': 'contrail-2',
                'slave-03': 'contrail-3'
            }
        )
        raw_data = [{
            'mac': None,
            'mode': 'active-backup',
            'name': 'bond0',
            'slaves': [
                {'name': iface_alias('eth4')},
                {'name': iface_alias('eth2')},
            ],
            'state': None,
            'type': 'bond',
            'assigned_networks': []
        }, ]

        interfaces = {
            iface_alias('eth0'): ['fuelweb_admin'],
            iface_alias('eth1'): ['public'],
            iface_alias('eth3'): ['private'],
            'bond0': [
                'management',
                'storage',
            ]
        }

        cluster_nodes = \
            self.fuel_web.client.list_cluster_nodes(self.cluster_id)
        for node in cluster_nodes:
            self.fuel_web.update_node_networks(
                node['id'], interfaces_dict=interfaces,
                raw_data=raw_data
            )

        # enable plugin in contrail settings
        self._activate_plugin()

        self.fuel_web.deploy_cluster_wait(self.cluster_id,
                                          check_services=False)

        # create net and subnet
        self._create_net_subnet(self.cluster_id)

        # TODO
        # Tests using north-south connectivity are expected to fail because
        # they require additional gateway nodes, and specific contrail
        # settings. This mark is a workaround until it's verified
        # and tested manually.
        # Also workaround according to bug 1457515
        # When it will be done 'should_fail=3' and
        # 'failed_test_name' parameter should be removed.

        self.fuel_web.run_ostf(
            cluster_id=self.cluster_id,
            should_fail=3,
            failed_test_name=[('Check network connectivity '
                               'from instance via floating IP'),
                              'Launch instance with file injection',
                              'Check that required services are running']
        )
Esempio n. 24
0
    def __init__(self):
        super(BondingTestDPDK, self).__init__()
        self.TEMPLATE_OLD_SERIALIZATION_BOND_CONFIG['interface_properties'] = {
            'dpdk': {
                'available': True
            }
        }
        self.BOND_LIST = [
            {
                'name':
                'bond0',
                'slaves': [{
                    'name': iface_alias('eth3')
                }, {
                    'name': iface_alias('eth2')
                }],
            },
            {
                'name':
                'bond1',
                'slaves': [{
                    'name': iface_alias('eth1')
                }, {
                    'name': iface_alias('eth0')
                }],
            },
            {
                'name':
                'bond2',
                'slaves': [
                    {
                        'name': iface_alias('eth5')
                    },
                    {
                        'name': iface_alias('eth4')
                    },
                ],
            },
        ]

        self.INTERFACES = {
            'bond0': [
                'public',
                'management',
                'storage',
            ],
            'bond1': ['fuelweb_admin'],
            'bond2': ['private'],
        }

        self.BOND_ATTR = {
            'dpdk': {
                'enabled': {
                    'type': 'checkbox',
                    'value': False,
                    'weight': 10,
                    'label': 'DPDK enabled'
                },
                'metadata': {
                    'weight': 40,
                    'label': 'DPDK'
                }
            }
        }
Esempio n. 25
0
    def jumbo_frames_neutron_vxlan(self):
        """Deploy cluster in ha mode with 3 controllers and Neutron VXLAN

        Scenario:
            1. Revert snapshot ready_with_5_slaves_jumbo_frames
            2. Create cluster with neutron VXLAN
            3. Add 3 node with controller role
            4. Add 2 nodes with compute role
            5. Deploy the cluster
            6. Run network verification
            7. Run OSTF
            8. Run MTU size check

        Duration 120m
        Snapshot ready_jumbo_frames_neutron_vxlan

        """
        self.env.revert_snapshot("ready_with_5_slaves_jumbo_frames")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=CONF.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": CONF.NEUTRON_SEGMENT['tun'],
            }
        )

        interfaces = {
            iface_alias('eth0'): ['fuelweb_admin'],
            iface_alias('eth1'): ['public'],
            iface_alias('eth2'): ['management'],
            iface_alias('eth3'): ['private'],
            iface_alias('eth4'): ['storage'],
        }

        interfaces_update = [{
            'name': iface_alias('eth3'),
            'interface_properties': {
                'mtu': 9000,
                'disable_offloading': False
            },
        }]

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            }
        )

        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in slave_nodes:
            self.fuel_web.update_node_networks(
                node['id'], interfaces,
                override_ifaces_params=interfaces_update)

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        for node_name in ['slave-01', 'slave-02', 'slave-03',
                          'slave-04', 'slave-05']:
            node = self.fuel_web.get_nailgun_node_by_name(node_name)
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                asserts.assert_true(
                    self.check_node_iface_mtu(remote,
                                              iface_alias('eth3'),
                                              9000),
                    "MTU on {0} is not 9000. Actual value: {1}"
                        .format(remote.host,
                                self.get_node_iface(remote,
                                                    iface_alias('eth3'))))

        self.check_mtu_size_between_instances(mtu_offset=50)
        self.env.make_snapshot("ready_jumbo_frames_neutron_vxlan")
Esempio n. 26
0
    def offloading_neutron_vlan(self):
        """Deploy cluster with specific offload modes and neutron VLAN

        Scenario:
            1. Create cluster with neutron VLAN
            2. Add 1 node with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Setup offloading types
            5. Run network verification
            6. Deploy the cluster
            7. Run network verification
            8. Verify offloading modes on nodes
            9. Run OSTF

        Duration 30m
        Snapshot offloading_neutron_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": 'vlan',
            }
        )

        interfaces = {
            iface_alias('eth1'): ['public'],
            iface_alias('eth2'): ['private'],
            iface_alias('eth3'): ['management'],
            iface_alias('eth4'): ['storage'],
        }

        offloading_modes = [{
            'name': iface_alias('eth1'),
            'offloading_modes': [{
                'state': 'true',
                'name': 'rx-vlan-offload',
                'sub': []}, {
                'state': 'true',
                'name': 'tx-vlan-offload',
                'sub': []}]}, {
            'name': iface_alias('eth2'),
            'offloading_modes': [{
                'state': 'false',
                'name': 'large-receive-offload',
                'sub': []}]}]

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )

        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        self.show_step(4)
        for node in slave_nodes:
            self.fuel_web.update_node_networks(node['id'],
                                               deepcopy(interfaces))
            for offloading in offloading_modes:
                self.fuel_web.update_offloads(
                    node['id'], deepcopy(offloading), offloading['name'])
        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        nodes = [self.fuel_web.get_nailgun_node_by_name(node)
                 for node in ['slave-01', 'slave-02', 'slave-03']]
        for node in nodes:
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                logger.info("Verify Offload types")

                result = check_offload(remote,
                                       iface_alias('eth1'),
                                       'rx-vlan-offload')
                assert_equal(result, "on",
                             "Offload type {0} is {1} on remote host"
                             .format('rx-vlan-offload', result))

                result = check_offload(remote,
                                       iface_alias('eth1'),
                                       'tx-vlan-offload')
                assert_equal(result, "on",
                             "Offload type {0} is {1} on remote host"
                             .format('tx-vlan-offload', result))

                result = check_offload(remote,
                                       iface_alias('eth2'),
                                       'large-receive-offload')
                assert_equal(result, "off",
                             "Offload type {0} is {1} on remote host"
                             .format('large-receive-offload', result))

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("offloading_neutron_vlan")
    def offloading_neutron_vlan(self):
        """Deploy cluster with specific offload modes and neutron VLAN

        Scenario:
            1. Create cluster with neutron VLAN
            2. Add 1 node with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Setup offloading types
            5. Run network verification
            6. Deploy the cluster
            7. Verify offloading modes on nodes
            8. Run network verification
            9. Run OSTF

        Duration 30m
        Snapshot offloading_neutron_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={"net_provider": "neutron", "net_segment_type": settings.NEUTRON_SEGMENT["vlan"]},
        )

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {"slave-01": ["controller"], "slave-02": ["compute"], "slave-03": ["cinder"]}
        )

        iface1 = settings.iface_alias("eth3")
        iface2 = settings.iface_alias("eth2")

        nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)

        self.show_step(4)
        offloadings_1 = []
        offloadings_2 = []
        for node in nodes:
            modes = self.fuel_web.get_offloading_modes(node["id"], [iface1])
            for name in self.offloadings_1:
                if name in modes and name not in offloadings_1:
                    offloadings_1.append(name)
            modes = self.fuel_web.get_offloading_modes(node["id"], [iface2])
            for name in self.offloadings_2:
                if name in modes and name not in offloadings_2:
                    offloadings_2.append(name)

        assert_true(len(offloadings_1) > 0, "No types for disable offloading")
        assert_true(len(offloadings_2) > 0, "No types for enable offloading")

        modes = self.prepare_offloading_modes(iface1, offloadings_1, False)
        modes += self.prepare_offloading_modes(iface2, offloadings_2, True)

        for node in nodes:
            self.fuel_web.update_node_networks(node["id"], interfaces_dict=deepcopy(self.interfaces))
            for offloading in modes:
                self.fuel_web.update_offloads(node["id"], deepcopy(offloading), offloading["name"])

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.check_offloading_modes(nodes, offloadings_1, iface1, "off")
        self.check_offloading_modes(nodes, offloadings_2, iface2, "on")

        self.show_step(8)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("offloading_neutron_vlan")
Esempio n. 28
0
    def offloading_neutron_vlan(self):
        """Deploy cluster with specific offload modes and neutron VLAN

        Scenario:
            1. Create cluster with neutron VLAN
            2. Add 1 node with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Setup offloading types
            5. Run network verification
            6. Deploy the cluster
            7. Run network verification
            8. Verify offloading modes on nodes
            9. Run OSTF

        Duration 30m
        Snapshot offloading_neutron_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )

        iface1 = settings.iface_alias('eth3')
        iface2 = settings.iface_alias('eth2')

        nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)

        self.show_step(4)
        offloadings_1 = []
        offloadings_2 = []
        for node in nodes:
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface1])
            for name in self.offloadings_1:
                if name in modes and name not in offloadings_1:
                    offloadings_1.append(name)
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface2])
            for name in self.offloadings_2:
                if name in modes and name not in offloadings_2:
                    offloadings_2.append(name)

        assert_true(len(offloadings_1) > 0, "No types for disable offloading")
        assert_true(len(offloadings_2) > 0, "No types for enable offloading")

        modes = self.prepare_offloading_modes(iface1, offloadings_1, False)
        modes += self.prepare_offloading_modes(iface2, offloadings_2, True)

        for node in nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                interfaces_dict=deepcopy(self.interfaces))
            for offloading in modes:
                self.fuel_web.update_offloads(
                    node['id'], deepcopy(offloading), offloading['name'])

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        for node in nodes:
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                for name in offloadings_1:
                    result = check_offload(remote, iface1, name)
                    assert_equal(result, "off",
                                 "Offload type {0} is {1} on {2}".format(
                                         name, result, node['name']))
                for name in offloadings_2:
                    result = check_offload(remote, iface2, name)
                    assert_equal(result, "on",
                                 "Offload type {0} is {1} on {2}".format(
                                         name, result, node['name']))

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("offloading_neutron_vlan")
Esempio n. 29
0
    def offloading_neutron_vlan(self):
        """Deploy cluster with specific offload modes and neutron VLAN

        Scenario:
            1. Create cluster with neutron VLAN
            2. Add 1 node with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Setup offloading types
            5. Run network verification
            6. Deploy the cluster
            7. Verify offloading modes on nodes
            8. Run network verification
            9. Run OSTF

        Duration 30m
        Snapshot offloading_neutron_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )

        iface1 = settings.iface_alias('eth3')
        iface2 = settings.iface_alias('eth2')

        nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)

        self.show_step(4)
        offloadings_1 = {}
        offloadings_2 = {}
        for node in nodes:
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface1])
            for name in self.offloadings_1:
                if name in modes and name not in offloadings_1:
                    offloadings_1[name] = self.offloadings_1[name]
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface2])
            for name in self.offloadings_2:
                if name in modes and name not in offloadings_2:
                    offloadings_2[name] = self.offloadings_2[name]

        assert_true(len(offloadings_1) > 0, "No types for disable offloading")
        assert_true(len(offloadings_2) > 0, "No types for enable offloading")

        offloadings = {
            iface1: offloadings_1,
            iface2: offloadings_2
        }
        for node in nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                interfaces_dict=deepcopy(self.interfaces))
            for offloading in offloadings:
                self.fuel_web.update_offloads(
                    node['id'], offloadings[offloading], offloading)

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.check_offloading_modes(nodes, offloadings_1, iface1, 'off')
        self.check_offloading_modes(nodes, offloadings_2, iface2, 'on')

        self.show_step(8)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("offloading_neutron_vlan")
Esempio n. 30
0
    def offloading_neutron_vlan(self):
        """Deploy cluster with specific offload modes and neutron VLAN

        Scenario:
            1. Create cluster with neutron VLAN
            2. Add 1 node with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Setup offloading types
            5. Run network verification
            6. Deploy the cluster
            7. Run network verification
            8. Verify offloading modes on nodes
            9. Run OSTF

        Duration 30m
        Snapshot offloading_neutron_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )

        iface1 = settings.iface_alias('eth3')
        iface2 = settings.iface_alias('eth2')

        nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)

        self.show_step(4)
        offloadings_1 = []
        offloadings_2 = []
        for node in nodes:
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface1])
            for name in self.offloadings_1:
                if name in modes and name not in offloadings_1:
                    offloadings_1.append(name)
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface2])
            for name in self.offloadings_2:
                if name in modes and name not in offloadings_2:
                    offloadings_2.append(name)

        assert_true(len(offloadings_1) > 0, "No types for disable offloading")
        assert_true(len(offloadings_2) > 0, "No types for enable offloading")

        modes = self.prepare_offloading_modes(iface1, offloadings_1, False)
        modes += self.prepare_offloading_modes(iface2, offloadings_2, True)

        for node in nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                interfaces_dict=deepcopy(self.interfaces))
            for offloading in modes:
                self.fuel_web.update_offloads(
                    node['id'], deepcopy(offloading), offloading['name'])

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        for node in nodes:
            for name in offloadings_1:
                result = check_offload(node['ip'], iface1, name)
                assert_equal(result, "off",
                             "Offload type {0} is {1} on {2}".format(
                                     name, result, node['name']))
            for name in offloadings_2:
                result = check_offload(node['ip'], iface2, name)
                assert_equal(result, "on",
                             "Offload type {0} is {1} on {2}".format(
                                     name, result, node['name']))

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("offloading_neutron_vlan")
Esempio n. 31
0
    def ha_ceilometer_untag_network(self):
        """Deployment with 3 controllers, NeutronVLAN and untag network,
           with Ceilometer

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Choose Ceilometer
            4. Add 3 controller
            5. Add 1 compute
            6. Add 3 mongo+cinder
            7. Move Storage network to eth1 and specify vlan start
            8. Move Management network to eth2 (it's untagged by default)
            9. Verify networks
            10. Deploy the environment
            11. Verify networks
            12. Run OSTF tests

        Duration 180m
        Snapshot ha_ceilometer_untag_network
        """
        self.env.revert_snapshot('ready_with_9_slaves')
        data = {
            'ceilometer': True,
            'tenant': 'mongomultirole',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        self.show_step(1, initialize=True)
        self.show_step(2)
        self.show_step(3)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)

        self.show_step(4)
        self.show_step(5)
        self.show_step(6)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['mongo', 'cinder'],
                'slave-06': ['mongo', 'cinder'],
                'slave-07': ['mongo', 'cinder']
            })
        self.show_step(7)
        self.show_step(8)
        vlan_turn_on = {'vlan_start': 102}
        interfaces = {
            iface_alias('eth0'): ['private'],
            iface_alias('eth1'): ['storage', 'public'],
            iface_alias('eth2'): ['management'],
            iface_alias('eth3'): [],
            iface_alias('eth4'): []
        }

        nets = self.fuel_web.client.get_networks(cluster_id)['networks']
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(node['id'], interfaces)

        [net.update(vlan_turn_on) for net in nets if net['name'] == 'storage']
        self.fuel_web.client.update_network(cluster_id, networks=nets)

        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(10)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(11)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(12)
        self.fuel_web.run_ostf(cluster_id)
        self.env.make_snapshot('ha_ceilometer_untag_network')
Esempio n. 32
0
    def offloading_neutron_vlan(self):
        """Deploy cluster with specific offload modes and neutron VLAN

        Scenario:
            1. Create cluster with neutron VLAN
            2. Add 1 node with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Setup offloading types
            5. Run network verification
            6. Deploy the cluster
            7. Run network verification
            8. Verify offloading modes on nodes
            9. Run OSTF

        Duration 30m
        Snapshot offloading_neutron_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      'vlan',
                                                  })

        interfaces = {
            iface_alias('eth1'): ['public'],
            iface_alias('eth2'): ['private'],
            iface_alias('eth3'): ['management'],
            iface_alias('eth4'): ['storage'],
        }

        offloading_modes = [{
            'name':
            iface_alias('eth1'),
            'offloading_modes': [{
                'state': 'true',
                'name': 'rx-vlan-offload',
                'sub': []
            }, {
                'state': 'true',
                'name': 'tx-vlan-offload',
                'sub': []
            }]
        }, {
            'name':
            iface_alias('eth2'),
            'offloading_modes': [{
                'state': 'false',
                'name': 'large-receive-offload',
                'sub': []
            }]
        }]

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            })

        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        self.show_step(4)
        for node in slave_nodes:
            self.fuel_web.update_node_networks(node['id'],
                                               deepcopy(interfaces))
            for offloading in offloading_modes:
                self.fuel_web.update_offloads(node['id'], deepcopy(offloading),
                                              offloading['name'])
        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-01', 'slave-02', 'slave-03']
        ]
        for node in nodes:
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                logger.info("Verify Offload types")

                result = check_offload(remote, iface_alias('eth1'),
                                       'rx-vlan-offload')
                assert_equal(
                    result, "on",
                    "Offload type {0} is {1} on remote host".format(
                        'rx-vlan-offload', result))

                result = check_offload(remote, iface_alias('eth1'),
                                       'tx-vlan-offload')
                assert_equal(
                    result, "on",
                    "Offload type {0} is {1} on remote host".format(
                        'tx-vlan-offload', result))

                result = check_offload(remote, iface_alias('eth2'),
                                       'large-receive-offload')
                assert_equal(
                    result, "off",
                    "Offload type {0} is {1} on remote host".format(
                        'large-receive-offload', result))

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("offloading_neutron_vlan")
Esempio n. 33
0
class TestOffloading(TestBasic):

    interfaces = {
        settings.iface_alias('eth1'): ['public'],
        settings.iface_alias('eth2'): ['management'],
        settings.iface_alias('eth3'): ['private'],
        settings.iface_alias('eth4'): ['storage'],
    }

    offloadings_1 = ['generic-receive-offload',
                     'generic-segmentation-offload',
                     'tcp-segmentation-offload',
                     'large-receive-offload']

    offloadings_2 = ['rx-all',
                     'rx-vlan-offload',
                     'tx-vlan-offload']

    def prepare_offloading_modes(self, interface, types, state):
        return [{'name': interface,
                 'offloading_modes': [{'name': name, 'state': state,
                                       'sub': []} for name in types]}]

    @test(depends_on=[SetupEnvironment.prepare_slaves_3],
          groups=["offloading_neutron_vlan", "offloading"])
    @log_snapshot_after_test
    def offloading_neutron_vlan(self):
        """Deploy cluster with specific offload modes and neutron VLAN

        Scenario:
            1. Create cluster with neutron VLAN
            2. Add 1 node with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Setup offloading types
            5. Run network verification
            6. Deploy the cluster
            7. Run network verification
            8. Verify offloading modes on nodes
            9. Run OSTF

        Duration 30m
        Snapshot offloading_neutron_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )

        iface1 = settings.iface_alias('eth3')
        iface2 = settings.iface_alias('eth2')

        nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)

        self.show_step(4)
        offloadings_1 = []
        offloadings_2 = []
        for node in nodes:
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface1])
            for name in self.offloadings_1:
                if name in modes and name not in offloadings_1:
                    offloadings_1.append(name)
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface2])
            for name in self.offloadings_2:
                if name in modes and name not in offloadings_2:
                    offloadings_2.append(name)

        assert_true(len(offloadings_1) > 0, "No types for disable offloading")
        assert_true(len(offloadings_2) > 0, "No types for enable offloading")

        modes = self.prepare_offloading_modes(iface1, offloadings_1, False)
        modes += self.prepare_offloading_modes(iface2, offloadings_2, True)

        for node in nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                interfaces_dict=deepcopy(self.interfaces))
            for offloading in modes:
                self.fuel_web.update_offloads(
                    node['id'], deepcopy(offloading), offloading['name'])

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        for node in nodes:
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                for name in offloadings_1:
                    result = check_offload(remote, iface1, name)
                    assert_equal(result, "off",
                                 "Offload type {0} is {1} on {2}".format(
                                         name, result, node['name']))
                for name in offloadings_2:
                    result = check_offload(remote, iface2, name)
                    assert_equal(result, "on",
                                 "Offload type {0} is {1} on {2}".format(
                                         name, result, node['name']))

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("offloading_neutron_vlan")

    @test(depends_on=[SetupEnvironment.prepare_slaves_3],
          groups=["offloading_neutron_vxlan", "offloading"])
    @log_snapshot_after_test
    def offloading_neutron_vxlan(self):
        """Deploy cluster with specific offload modes and neutron VXLAN

        Scenario:
            1. Create cluster with neutron VXLAN
            2. Add 1 node with controller role
            3. Add 1 node with compute role and 1 node with cinder role
            4. Setup offloading types
            5. Run network verification
            6. Deploy the cluster
            7. Run network verification
            8. Verify offloading modes on nodes
            9. Run OSTF

        Duration 30m
        Snapshot offloading_neutron_vxlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )

        iface1 = settings.iface_alias('eth3')
        iface2 = settings.iface_alias('eth2')

        nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)

        self.show_step(4)
        offloadings_1 = []
        offloadings_2 = []
        for node in nodes:
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface1])
            for name in self.offloadings_1:
                if name in modes and name not in offloadings_1:
                    offloadings_1.append(name)
            modes = self.fuel_web.get_offloading_modes(node['id'], [iface2])
            for name in self.offloadings_2:
                if name in modes and name not in offloadings_2:
                    offloadings_2.append(name)

        assert_true(len(offloadings_1) > 0, "No types for disable offloading")
        assert_true(len(offloadings_2) > 0, "No types for enable offloading")

        modes = self.prepare_offloading_modes(iface1, offloadings_1, False)
        modes += self.prepare_offloading_modes(iface2, offloadings_2, True)

        for node in nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                interfaces_dict=deepcopy(self.interfaces))
            for offloading in modes:
                self.fuel_web.update_offloads(
                    node['id'], deepcopy(offloading), offloading['name'])

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        for node in nodes:
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                for name in offloadings_1:
                    result = check_offload(remote, iface1, name)
                    assert_equal(result, "off",
                                 "Offload type {0} is {1} on {2}".format(
                                         name, result, node['name']))
                for name in offloadings_2:
                    result = check_offload(remote, iface2, name)
                    assert_equal(result, "on",
                                 "Offload type {0} is {1} on {2}".format(
                                         name, result, node['name']))

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("offloading_neutron_vxlan")
Esempio n. 34
0
class TestJumboFrames(base_test_case.TestBasic):
    def __init__(self):
        self.os_conn = None
        super(TestJumboFrames, self).__init__()

    interfaces = {
        iface_alias('eth0'): ['fuelweb_admin'],
        iface_alias('eth1'): ['public'],
        iface_alias('eth2'): ['management'],
        iface_alias('eth3'): ['private'],
        iface_alias('eth4'): ['storage'],
    }

    interfaces_update = [{
        'name': iface_alias('eth3'),
        'interface_properties': {
            'mtu': 9000,
            'disable_offloading': False
        },
    }]

    def check_node_iface_mtu(self, node, iface, mtu):
        """Check mtu on environment node network interface."""

        return "mtu {0}".format(mtu) in self.get_node_iface(node, iface)

    @staticmethod
    def get_node_iface(node, iface):
        """Get environment node network interface."""

        command = "sudo ip link show {0}".format(iface)
        return ''.join(node.execute(command)['stdout'])

    @staticmethod
    def set_host_iface_mtu(iface, mtu):
        """Set devops/fuel-qa host network interface mtu."""

        command = "sudo ip link set {0} mtu {1}".format(iface, mtu).split()
        return subprocess.call(command, stderr=subprocess.STDOUT)

    @staticmethod
    def get_host_iface(iface):
        """Get devops/fuel-qa host network interface."""

        command = "sudo ip link show {0}".format(iface).split()
        return subprocess.check_output(command, stderr=subprocess.STDOUT)

    @staticmethod
    def get_host_bridge_ifaces(bridge_name):
        """Get list of devops/fuel-qa host network bridge interfaces."""

        command = "sudo brctl show {0}".format(bridge_name).split()
        ifaces = subprocess.check_output(command, stderr=subprocess.STDOUT)

        ifaces = ifaces.splitlines()[1:]
        bridge_iface = ifaces[0].split()[-1]
        ifaces = [iface.strip() for iface in ifaces[1:]]
        ifaces.append(bridge_iface)

        return ifaces

    def boot_instance_on_node(self,
                              hypervisor_name,
                              label,
                              boot_timeout=300,
                              need_floating_ip=True):
        instance = self.os_conn.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name),
            label=label)
        logger.info("New instance {0} created on {1}".format(
            instance.id, hypervisor_name))
        ip = self.os_conn.get_nova_instance_ip(instance,
                                               net_name=label,
                                               addrtype='fixed')
        logger.info("Instance {0} has IP {1}".format(instance.id, ip))

        if not need_floating_ip:
            return self.os_conn.nova.servers.get(instance.id)

        ip = self.os_conn.assign_floating_ip(instance)
        logger.info("Floating address {0} associated with instance {1}".format(
            ip.ip, instance.id))

        logger.info("Wait for ping from instance {}".format(instance.id))
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(ip.ip, 22),
            timeout=boot_timeout,
            timeout_msg=("Instance {0} is unreachable for {1} seconds".format(
                instance.id, boot_timeout)))

        return self.os_conn.nova.servers.get(instance.id)

    def ping_instance_from_instance(self,
                                    source_instance,
                                    destination_instance,
                                    net_from,
                                    net_to,
                                    size,
                                    count=1):
        creds = ("cirros", "cubswin:)")
        destination_ip = self.os_conn.get_nova_instance_ip(
            destination_instance, net_name=net_to, addrtype='fixed')
        source_ip = self.os_conn.get_nova_instance_ip(source_instance,
                                                      net_name=net_from,
                                                      addrtype='floating')

        with self.fuel_web.get_ssh_for_node("slave-01") as ssh:
            command = "ping -s {0} {1}".format(size, destination_ip)
            if count:
                command = "{0} -c {1}".format(command, count)
            logger.info(
                "Try to ping private address {0} from {1} with {2} {3} bytes "
                "packet(s): {4}".format(destination_ip, source_ip, count, size,
                                        command))
            ping = self.os_conn.execute_through_host(ssh, source_ip, command,
                                                     creds)
            logger.info("Ping result: \n"
                        "{0}\n"
                        "{1}\n"
                        "exit_code={2}".format(ping['stdout'], ping['stderr'],
                                               ping['exit_code']))

            return 0 == ping['exit_code']

    def check_mtu_size_between_instances(self, mtu_offset, diff_net=False):
        """Check private network mtu size

        Scenario:
            1. Boot two instances on different compute hosts
            2. Ping one from another with 1472 bytes package
            3. Ping one from another with 8972 bytes package
            4. Ping one from another with 8973 bytes package
            5. Ping one from another with 14472 bytes package
            6. Delete instances

        """
        cluster_id = self.fuel_web.get_last_created_cluster()
        self.os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        net_destination = net_name
        need_floating_ip = True
        hypervisors = self.os_conn.get_hypervisors()

        if diff_net:
            net_destination = 'private1'
            need_floating_ip = False
            net1 = self.os_conn.create_network(net_destination)['network']
            subnet1 = self.os_conn.create_subnet('private1_subnet', net1['id'],
                                                 '192.168.200.0/24')
            router = self.os_conn.get_router_by_name('router04')
            self.os_conn.add_router_interface(router['id'], subnet1['id'])

        destination_instance = self.boot_instance_on_node(
            hypervisors[1].hypervisor_hostname,
            label=net_destination,
            need_floating_ip=need_floating_ip)
        source_instance = self.boot_instance_on_node(
            hypervisors[0].hypervisor_hostname, label=net_name)

        logger.info(
            "Wait for ping from instance {}".format(destination_instance))
        devops_helpers.wait(
            lambda: self.ping_instance_from_instance(source_instance,
                                                     destination_instance,
                                                     net_name,
                                                     net_destination,
                                                     size=15,
                                                     count=3),
            interval=10,
            timeout=600,
            timeout_msg=("Instance {0} is unreachable for 600 seconds".format(
                destination_instance.id)))

        for mtu in [1500, 9000, 9001]:
            size = mtu - 28 - mtu_offset
            if mtu <= 9000:
                asserts.assert_true(
                    self.ping_instance_from_instance(source_instance,
                                                     destination_instance,
                                                     net_name,
                                                     net_destination,
                                                     size=size,
                                                     count=3),
                    "Ping response was not received for "
                    "{} bytes package".format(mtu))
            else:
                asserts.assert_false(
                    self.ping_instance_from_instance(source_instance,
                                                     destination_instance,
                                                     net_name,
                                                     net_destination,
                                                     size=size,
                                                     count=3),
                    "Ping response was not received for "
                    "{} bytes package".format(mtu))

        for instance in [source_instance, destination_instance]:
            self.os_conn.delete_instance(instance)
            self.os_conn.verify_srv_deleted(instance)

    @test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5],
          groups=["prepare_5_slaves_with_jumbo_frames"])
    def prepare_5_slaves_with_jumbo_frames(self):
        """Setup jumbo frames on private bridge on host

        Scenario:
            1. Find bridge with name "private"
            2. Set mtu 9000 for all bridge interfaces
            3. Make snapshot ready_with_5_slaves_jumbo_frames

        Duration 5m
        Snapshot ready_with_5_slaves_jumbo_frames

        """
        self.check_run("ready_with_5_slaves_jumbo_frames")
        self.env.revert_snapshot("ready_with_5_slaves")

        devops_env = self.env.d_env
        private_bridge = devops_env.get_network(name='private').bridge_name()
        logger.info(
            "Search for {0} interfaces for update".format(private_bridge))

        bridge_interfaces = self.get_host_bridge_ifaces(private_bridge)
        logger.info("Found {0} interfaces for update: {1}".format(
            len(bridge_interfaces), bridge_interfaces))

        for iface in bridge_interfaces:
            self.set_host_iface_mtu(iface, 9000)
            logger.info("MTU of {0} was changed to 9000".format(iface))
            logger.debug("New {0} interface properties:\n{1}".format(
                iface, self.get_host_iface(iface)))

        self.env.make_snapshot("ready_with_5_slaves_jumbo_frames",
                               is_make=True)

    @test(depends_on=[prepare_5_slaves_with_jumbo_frames],
          groups=["jumbo_frames_neutron_vlan"])
    @decorators.log_snapshot_after_test
    def jumbo_frames_neutron_vlan(self):
        """Verify jumbo frames between instances on HA Neutron VLAN

        Scenario:
            1. Revert snapshot ready_with_5_slaves_jumbo_frames
            2. Create cluster with neutron VLAN
            3. Add 3 node with controller role
            4. Add 2 nodes with compute role
            5. Set mtu=9000 on private interface
            6. Deploy the cluster
            7. Run network verification
            8. Check MTU on private interface
            9. Run MTU size check
            10. Run OSTF

        Duration 120m
        Snapshot ready_jumbo_frames_neutron_vlan

        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_5_slaves_jumbo_frames")

        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            })

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            })

        self.show_step(5)
        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in slave_nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                self.interfaces,
                override_ifaces_params=self.interfaces_update)

        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        for node_name in [
                'slave-01', 'slave-02', 'slave-03', 'slave-04', 'slave-05'
        ]:
            node = self.fuel_web.get_nailgun_node_by_name(node_name)
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                for iface in self.interfaces_update:
                    asserts.assert_true(
                        self.check_node_iface_mtu(remote, iface['name'], 9000),
                        "MTU on {0} is not 9000. "
                        "Actual value: {1}".format(
                            remote.host,
                            self.get_node_iface(remote, iface['name'])))

        self.show_step(9)
        self.check_mtu_size_between_instances(mtu_offset=0)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("ready_jumbo_frames_neutron_vlan")

    @test(depends_on=[prepare_5_slaves_with_jumbo_frames],
          groups=["jumbo_frames_neutron_vxlan"])
    @decorators.log_snapshot_after_test
    def jumbo_frames_neutron_vxlan(self):
        """Verify jumbo frames between instances on HA and Neutron VXLAN

        Scenario:
            1. Revert snapshot ready_with_5_slaves_jumbo_frames
            2. Create cluster with neutron VXLAN
            3. Add 3 node with controller role
            4. Add 2 nodes with compute role
            5. Set mtu=9000 on private interface
            6. Deploy the cluster
            7. Run network verification
            8. Check MTU on private interface
            9. Run MTU size check
            10. Run OSTF

        Duration 120m
        Snapshot ready_jumbo_frames_neutron_vxlan

        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_5_slaves_jumbo_frames")

        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
            })

        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            })

        self.show_step(5)
        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in slave_nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                self.interfaces,
                override_ifaces_params=self.interfaces_update)

        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        for node_name in [
                'slave-01', 'slave-02', 'slave-03', 'slave-04', 'slave-05'
        ]:
            node = self.fuel_web.get_nailgun_node_by_name(node_name)
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                for iface in self.interfaces_update:
                    asserts.assert_true(
                        self.check_node_iface_mtu(remote, iface['name'], 9000),
                        "MTU on {0} is not 9000. "
                        "Actual value: {1}".format(
                            remote.host,
                            self.get_node_iface(remote, iface['name'])))

        self.show_step(9)
        self.check_mtu_size_between_instances(mtu_offset=50)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("ready_jumbo_frames_neutron_vxlan")

    @test(depends_on=[prepare_5_slaves_with_jumbo_frames],
          groups=["jumbo_frames_neutron_diff_net_vlan"])
    @decorators.log_snapshot_after_test
    def jumbo_frames_neutron_diff_net_vlan(self):
        """Verify jumbo frames between instances in different networks on HA
        and Neutron VLAN

        Scenario:
            1. Revert snapshot ready_with_5_slaves_jumbo_frames
            2. Create cluster with neutron VLAN
            3. Add 3 node with controller role
            4. Add 2 nodes with compute role
            5. Set mtu=9000 on private interface
            6. Deploy the cluster
            7. Run network verification
            8. Check MTU on private interface
            9. Run MTU size check
            10. Run OSTF

        Duration 120m
        Snapshot jumbo_frames_neutron_diff_bond_vlan

        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_5_slaves_jumbo_frames")

        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            })

        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            })

        self.show_step(5)
        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in slave_nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                interfaces_dict=deepcopy(self.interfaces),
                override_ifaces_params=self.interfaces_update)

        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        for node_name in [
                'slave-01', 'slave-02', 'slave-03', 'slave-04', 'slave-05'
        ]:
            node = self.fuel_web.get_nailgun_node_by_name(node_name)
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                for iface in self.interfaces_update:
                    asserts.assert_true(
                        self.check_node_iface_mtu(remote, iface['name'], 9000),
                        "MTU on {0} is not 9000. "
                        "Actual value: {1}".format(
                            remote.host,
                            self.get_node_iface(remote, iface['name'])))

        self.show_step(9)
        self.check_mtu_size_between_instances(mtu_offset=0, diff_net=True)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("jumbo_frames_neutron_diff_net_vlan")

    @test(depends_on=[prepare_5_slaves_with_jumbo_frames],
          groups=["jumbo_frames_neutron_diff_net_vxlan"])
    @decorators.log_snapshot_after_test
    def jumbo_frames_neutron_diff_net_vxlan(self):
        """Verify jumbo frames between instances in different networks on HA
        and Neutron VXLAN

        Scenario:
            1. Revert snapshot ready_with_5_slaves_jumbo_frames
            2. Create cluster with neutron VXLAN
            3. Add 3 node with controller role
            4. Add 2 nodes with compute role
            5. Set mtu=9000 on private interface
            6. Deploy the cluster
            7. Run network verification
            8. Check MTU on private interface
            9. Run MTU size check
            10. Run OSTF

        Duration 120m
        Snapshot jumbo_frames_neutron_diff_bond_vxlan

        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_5_slaves_jumbo_frames")

        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
            })

        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            })

        self.show_step(5)
        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in slave_nodes:
            self.fuel_web.update_node_networks(
                node['id'],
                interfaces_dict=deepcopy(self.interfaces),
                override_ifaces_params=self.interfaces_update)

        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        for node_name in [
                'slave-01', 'slave-02', 'slave-03', 'slave-04', 'slave-05'
        ]:
            node = self.fuel_web.get_nailgun_node_by_name(node_name)
            with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:
                for iface in self.interfaces_update:
                    asserts.assert_true(
                        self.check_node_iface_mtu(remote, iface['name'], 9000),
                        "MTU on {0} is not 9000. "
                        "Actual value: {1}".format(
                            remote.host,
                            self.get_node_iface(remote, iface['name'])))

        self.show_step(9)
        self.check_mtu_size_between_instances(mtu_offset=50, diff_net=True)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("jumbo_frames_neutron_diff_net_vxlan")
Esempio n. 35
0
METADATA_IP = os.environ.get('METADATA_IP', '169.254.169.254')
VM_USER = '******'
VM_PASS = '******'
AZ_VCENTER1 = 'vcenter'
AZ_VCENTER2 = 'vcenter2'
FLAVOR_NAME = 'm1.micro128'

PLUGIN_NAME = os.environ.get('PLUGIN_NAME', 'nsx-t')
NSXT_PLUGIN_PATH = os.environ.get('NSXT_PLUGIN_PATH')
NSXT_PLUGIN_VERSION = os.environ.get('NSXT_PLUGIN_VERSION', '1.0.0')
NSXT_MANAGERS_IP = os.environ.get('NSXT_MANAGERS_IP')
NSXT_USER = os.environ.get('NSXT_USER')


assigned_networks = {
    iface_alias('eth0'): ['fuelweb_admin', 'private'],
    iface_alias('eth1'): ['public'],
    iface_alias('eth2'): ['management'],
    iface_alias('eth4'): ['storage']
}

cluster_settings = {
    'net_provider': 'neutron',
    'assign_to_all_nodes': True,
    'net_segment_type': NEUTRON_SEGMENT_TYPE
}

plugin_configuration = {
    'insecure/value': get_var_as_bool(os.environ.get('NSXT_INSECURE'), True),
    'nsx_api_managers/value': NSXT_MANAGERS_IP,
    'nsx_api_user/value': NSXT_USER,
Esempio n. 36
0
    def ha_ceilometer_untag_network(self):
        """Deployment with 3 controllers, NeutronVLAN and untag network,
           with Ceilometer

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Choose Ceilometer
            4. Add 3 controller
            5. Add 1 compute
            6. Add 3 mongo+cinder
            7. Move Storage network to eth1 and specify vlan start
            8. Move Management network to eth2 and untag it
            9. Verify networks
            10. Deploy the environment
            11. Verify networks
            12. Run OSTF tests

        Duration 180m
        Snapshot ha_ceilometer_untag_network
        """
        self.env.revert_snapshot('ready_with_9_slaves')
        data = {
            'ceilometer': True,
            'tenant': 'mongomultirole',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        self.show_step(1, initialize=True)
        self.show_step(2)
        self.show_step(3)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )

        self.show_step(4)
        self.show_step(5)
        self.show_step(6)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['mongo', 'cinder'],
                'slave-06': ['mongo', 'cinder'],
                'slave-07': ['mongo', 'cinder']
            }
        )
        self.show_step(7)
        self.show_step(8)
        vlan_turn_on = {'vlan_start': 102}
        interfaces = {
            iface_alias('eth0'): ['private'],
            iface_alias('eth1'): ['storage', 'public'],
            iface_alias('eth2'): ['management'],
            iface_alias('eth3'): [],
            iface_alias('eth4'): []
        }

        nets = self.fuel_web.client.get_networks(cluster_id)['networks']
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(node['id'], interfaces)

        for net in nets:
            if net['name'] == 'storage':
                net.update(vlan_turn_on)

        self.fuel_web.client.update_network(cluster_id, networks=nets)

        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(10)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(11)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(12)
        self.fuel_web.run_ostf(cluster_id)
        self.env.make_snapshot('ha_ceilometer_untag_network')