def deploy_neutron_tun(self):
        """Deploy cluster in ha mode with 1 controller and Neutron VXLAN

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 2 nodes with compute role
            4. Run network verification
            5. Deploy the cluster
            6. Run network verification
            7. Run OSTF

        Duration 35m
        Snapshot deploy_neutron_tun

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        data = {
            "net_provider": 'neutron',
            "net_segment_type": NEUTRON_SEGMENT['tun'],
            'tenant': 'simpleTun',
            'user': '******',
            'password': '******'
        }
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings=data)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'cinder'],
                'slave-03': ['compute', 'cinder']
            })
        self.fuel_web.update_internal_network(cluster_id, '192.168.196.0/26',
                                              '192.168.196.1')
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id), data['user'],
            data['password'], data['tenant'])

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        # assert_equal(str(cluster['net_segment_type']), segment_type)
        self.fuel_web.check_fixed_network_cidr(cluster_id, os_conn)

        checkers.check_client_smoke(self.ssh_manager.admin_ip)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_tun")
Exemple #2
0
    def ha_one_controller_neutron_add_compute(self):
        """Add compute node to cluster in ha mode

        Scenario:
            1. Create cluster in HA mode
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Deploy the cluster
            5. Validate cluster was set up correctly, there are no dead
               services, there are no errors in logs
            6. Add 1 node with role compute
            7. Deploy changes
            8. Validate cluster was set up correctly, there are no dead
               services, there are no errors in logs
            9. Verify services list on compute nodes
            10. Run OSTF

        Duration 40m
        Snapshot: ha_one_controller_neutron_add_compute
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        data = {
            'tenant': 'neutronAddCompute',
            'user': '******',
            'password': '******',
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings=data)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id), data['user'],
            data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)

        self.fuel_web.update_nodes(cluster_id, {'slave-03': ['compute']}, True,
                                   False)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=6)

        assert_equal(3,
                     len(self.fuel_web.client.list_cluster_nodes(cluster_id)))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ha_one_controller_neutron_add_compute")
Exemple #3
0
    def ha_one_controller_backup_restore(self):
        """Backup/restore master node with one controller in cluster

        Scenario:
            1. Revert snapshot "deploy_ha_one_controller_backup_restore"
            2. Backup master
            3. Check backup
            4. Run OSTF
            5. Add 1 node with compute role
            6. Restore master
            7. Check restore
            8. Run OSTF

        Duration 35m

        """
        self.env.revert_snapshot("deploy_ha_one_controller_backup_restore")

        cluster_id = self.fuel_web.get_last_created_cluster()

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id), 'neutronOneController',
            'neutronOneController', 'neutronOneController')
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)

        with self.env.d_env.get_admin_remote() as remote:
            # Execute master node backup
            self.fuel_web.backup_master(remote)
            # Check created backup
            checkers.backup_check(remote)

        self.fuel_web.update_nodes(cluster_id, {'slave-03': ['compute']}, True,
                                   False)

        assert_equal(3,
                     len(self.fuel_web.client.list_cluster_nodes(cluster_id)))

        with self.env.d_env.get_admin_remote() as remote:
            self.fuel_web.restore_master(remote)
            checkers.restore_check_sum(remote)
            self.fuel_web.restore_check_nailgun_api(remote)
            checkers.iptables_check(remote)

        assert_equal(2,
                     len(self.fuel_web.client.list_cluster_nodes(cluster_id)))

        self.fuel_web.update_nodes(cluster_id, {'slave-03': ['compute']}, True,
                                   False)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ha_one_controller_backup_restore")
    def reconfigure_ml2_vlan_range(self):
        """Reconfigure neutron ml2 VLAN range

        Scenario:
            1. Revert snapshot "basic_env_for_reconfiguration"
            2. Upload a new openstack configuration
            3. Get uptime of process "neutron-server" on each controller
            4. Apply a new VLAN range(minimal range) to all nodes
            5. Wait for configuration applying
            6. Check that service "neutron-server" was restarted
            7. Verify ml2 plugin settings
            8. Create new private network
            9. Try to create one more, verify that it is impossible

        Snapshot: reconfigure_ml2_vlan_range

        """
        self.check_run('reconfigure_ml2_vlan_range')
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("basic_env_for_reconfiguration")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])

        self.show_step(2)
        config = utils.get_config_template('neutron')
        structured_config = get_structured_config_dict(config)
        self.fuel_web.client.upload_configuration(config, cluster_id)

        self.show_step(3)
        service_name = 'neutron-server'
        uptimes = self.get_service_uptime(controllers, service_name)

        self.show_step(4)
        task = self.fuel_web.client.apply_configuration(cluster_id)

        self.show_step(5)
        self.fuel_web.assert_task_success(task, timeout=300, interval=5)

        self.show_step(6)
        self.check_service_was_restarted(controllers, uptimes, service_name)

        self.show_step(7)
        self.check_config_on_remote(controllers, structured_config)

        self.show_step(8)
        self.show_step(9)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.check_ml2_vlan_range(os_conn)

        self.env.make_snapshot("reconfigure_ml2_vlan_range", is_make=True)
    def cinder_partition_preservation(self):
        """Verify partition preservation of Cinder data.

        Scenario:
            1. Revert the snapshot
            2. Create an OS volume
            3. Mark 'cinder' partition to be preserved on the node that hosts
               the created volume
            4. Reinstall the compute node that hosts the created volume
            5. Run network verification
            6. Run OSTF
            7. Verify that the volume is present and has 'available' status
               after the node reinstallation

        Duration: 105m
        """
        self.env.revert_snapshot("node_reinstallation_env")

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Create an OS volume
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        volume = os_conn.create_volume()
        volume_host = getattr(volume, 'os-vol-host-attr:host')
        cmp_nailgun = self.fuel_web.get_nailgun_node_by_fqdn(
            volume_host.rsplit('#')[0])

        # Mark 'cinder' partition to be preserved
        self._preserve_partition(cmp_nailgun['id'], "cinder")

        NodeReinstallationEnv._reinstall_nodes(
            self.fuel_web, cluster_id, [str(cmp_nailgun['id'])])

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity'])

        # Verify that the created volume is still available
        try:
            volume = os_conn.cinder.volumes.get(volume.id)
        except NotFound:
            raise AssertionError(
                "{0} volume is not available after its {1} hosting node "
                "reinstallation".format(volume.id, cmp_nailgun['fqdn']))
        expected_status = "available"
        assert_equal(
            expected_status,
            volume.status,
            "{0} volume status is {1} after its {2} hosting node "
            "reinstallation. Expected status is {3}.".format(
                volume.id, volume.status, cmp_nailgun['fqdn'], expected_status)
        )
Exemple #6
0
    def deploy_ha_flat(self):
        """Deploy cluster in HA mode with flat nova-network

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller roles
            3. Add 2 nodes with compute roles
            4. Deploy the cluster
            5. Validate cluster was set up correctly, there are no dead
            services, there are no errors in logs
            6. Run verify networks
            7. Run OSTF
            8. Make snapshot

        Snapshot deploy_ha_flat

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        data = {
            'tenant': 'novaHaFlat',
            'user': '******',
            'password': '******'
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE_HA,
                                                  settings=data)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id), data['user'],
            data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=16,
                                           networks_count=1,
                                           timeout=300)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.security.verify_firewall(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.env.make_snapshot("deploy_ha_flat")
Exemple #7
0
    def backup_restore_ha_flat(self):
        """Backup/restore master node with cluster in ha mode

        Scenario:
            1. Revert snapshot "deploy_ha_flat"
            2. Backup master
            3. Check backup
            4  Run OSTF
            5. Add 1 node with compute role
            6. Restore master
            7. Check restore
            8. Run OSTF

        Duration 50m

        """
        self.env.revert_snapshot("deploy_ha_flat")

        cluster_id = self.fuel_web.get_last_created_cluster()
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id), 'novaHaFlat',
            'novaHaFlat', 'novaHaFlat')
        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=16,
                                           networks_count=1,
                                           timeout=300)
        self.fuel_web.backup_master(self.env.get_admin_remote())
        checkers.backup_check(self.env.get_admin_remote())
        self.env.bootstrap_nodes(self.env.nodes().slaves[5:6])
        self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}, True,
                                   False)

        assert_equal(6,
                     len(self.fuel_web.client.list_cluster_nodes(cluster_id)))

        self.fuel_web.restore_master(self.env.get_admin_remote())
        checkers.restore_check_sum(self.env.get_admin_remote())
        self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote())
        checkers.iptables_check(self.env.get_admin_remote())

        assert_equal(5,
                     len(self.fuel_web.client.list_cluster_nodes(cluster_id)))

        self.env.bootstrap_nodes(self.env.nodes().slaves[5:6])
        self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}, True,
                                   False)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.env.make_snapshot("backup_restore_ha_flat")
Exemple #8
0
    def deploy_simple_vlan(self):
        """Deploy cluster in simple mode with nova-network VLAN Manager

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Set up cluster to use Network VLAN manager with 8 networks
            5. Deploy the cluster
            6. Validate cluster was set up correctly, there are no dead
            services, there are no errors in logs
            7. Run network verification
            8. Run OSTF

        Snapshot: deploy_simple_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        data = {
            'tenant': 'novaSimpleVlan',
            'user': '******',
            'password': '******'
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE_SIMPLE,
                                                  settings=data)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })

        self.fuel_web.update_vlan_network_fixed(cluster_id,
                                                amount=8,
                                                network_size=32)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'],
            data['user'], data['password'], data['tenant'])

        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=6,
                                           networks_count=8,
                                           timeout=300)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_simple_vlan")
Exemple #9
0
    def deploy_simple_flat(self):
        """Deploy cluster in simple mode with flat nova-network

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Deploy the cluster
            5. Validate cluster was set up correctly, there are no dead
            services, there are no errors in logs
            6. Verify networks
            7. Verify network configuration on controller
            8. Run OSTF

        Snapshot: deploy_simple_flat

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        data = {
            'tenant': 'novaSimpleFlat',
            'user': '******',
            'password': '******'
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE_SIMPLE,
                                                  settings=data)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })
        self.fuel_web.update_internal_network(cluster_id, '10.1.0.0/24')
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'],
            data['user'], data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=6,
                                           networks_count=1,
                                           timeout=300)
        self.fuel_web.check_fixed_network_cidr(
            cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))

        self.fuel_web.verify_network(cluster_id)

        self.env.verify_network_configuration("slave-01")

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_simple_flat", is_make=True)
    def deploy_stop_reset_on_ha(self):
        """Stop reset cluster in ha mode

        Scenario:
            1. Create cluster
            2. Add 3 node with controller role
            3. Deploy cluster
            4. Stop deployment
            5. Reset settings
            6. Add 2 nodes with compute role
            7. Re-deploy cluster
            8. Run OSTF

        Duration 60m
        Snapshot: deploy_stop_reset_on_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__, mode=hlp_data.DEPLOYMENT_MODE_HA)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller']
            })

        self.fuel_web.deploy_cluster_wait_progress(cluster_id, progress=10)
        self.fuel_web.stop_deployment_wait(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.update_nodes(cluster_id, {
            'slave-04': ['compute'],
            'slave-05': ['compute']
        })

        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=16,
                                           networks_count=1,
                                           timeout=300)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.env.make_snapshot("deploy_stop_reset_on_ha")
Exemple #11
0
    def ha_flat_add_compute(self):
        """Add compute node to cluster in HA mode with flat nova-network

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller roles
            3. Add 2 nodes with compute roles
            4. Deploy the cluster
            5. Validate cluster was set up correctly, there are no dead
            services, there are no errors in logs
            6. Add 1 node with compute role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF

        Duration 80m
        Snapshot ha_flat_add_compute

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE_HA)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=16,
                                           networks_count=1,
                                           timeout=300)

        self.env.bootstrap_nodes(self.env.nodes().slaves[5:6])
        self.fuel_web.update_nodes(cluster_id, {'slave-06': ['compute']}, True,
                                   False)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.env.make_snapshot("ha_flat_add_compute")
Exemple #12
0
    def dvs_vcenter_shutdown_controller(self):
        """Verify that vmclusters migrate after shutdown controller.

        Scenario:
            1. Revert to 'dvs_destructive_setup_2' snapshot.
            2. Verify connection between instances. Send ping,
               check that ping get reply.
            3. Shutdown controller.
            4. Check that vmclusters migrate to another controller.
            5. Verify connection between instances.
               Send ping, check that ping get reply

        Duration: 1.8 hours

        """
        self.show_step(1)
        self.env.revert_snapshot("dvs_destructive_setup_2")

        cluster_id = self.fuel_web.get_last_created_cluster()
        os_ip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(os_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        self.show_step(2)
        srv_list = os_conn.get_servers()
        fips = [
            os_conn.get_nova_instance_ip(srv,
                                         net_name=self.inter_net_name,
                                         addrtype='floating')
            for srv in srv_list
        ]
        openstack.ping_each_other(fips)

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id=cluster_id, roles=['controller'])

        openstack.check_service(ip=n_ctrls[0]['ip'], commands=self.cmds)
        openstack.ping_each_other(fips)

        self.show_step(3)
        self.fuel_web.warm_shutdown_nodes(
            [self.fuel_web.get_devops_node_by_nailgun_node(n_ctrls[0])])

        self.show_step(4)
        openstack.check_service(ip=n_ctrls[1]['ip'], commands=self.cmds)

        self.show_step(5)
        openstack.ping_each_other(fips, timeout=90)
    def ha_one_controller_backup_restore(self):
        """Backup/restore master node with cluster in ha mode

        Scenario:
            1. Revert snapshot "deploy_ha_one_controller_flat"
            2. Backup master
            3. Check backup
            4. Run OSTF
            5. Add 1 node with compute role
            6. Restore master
            7. Check restore
            8. Run OSTF

        Duration 35m

        """
        self.env.revert_snapshot("deploy_ha_one_controller_flat")

        cluster_id = self.fuel_web.get_last_created_cluster()
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id),
            'novaSimpleFlat', 'novaSimpleFlat', 'novaSimpleFlat')
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=6, networks_count=1, timeout=300)
        self.fuel_web.backup_master(self.env.get_admin_remote())
        checkers.backup_check(self.env.get_admin_remote())

        self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute']}, True, False)

        assert_equal(
            3, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))

        self.fuel_web.restore_master(self.env.get_admin_remote())
        checkers.restore_check_sum(self.env.get_admin_remote())
        self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote())
        checkers.iptables_check(self.env.get_admin_remote())

        assert_equal(
            2, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))

        self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute']}, True, False)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("ha_one_controller_backup_restore")
Exemple #14
0
    def check_ceph_cinder_cow(self):
        """Check copy-on-write when Cinder creates a volume from Glance image

        Scenario:
            1. Revert a snapshot where ceph enabled for volumes and images:
                 "ceph_ha_one_controller_compact"
            2. Create a Glance image in RAW disk format
            3. Create a Cinder volume using Glance image in RAW disk format
            4. Check on a ceph-osd node if the volume has a parent image.

        Duration 5m
        """
        self.env.revert_snapshot("ceph_ha_one_controller_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id), 'ceph1', 'ceph1',
            'ceph1')

        image_data = BytesIO(
            self.__class__.__name__.encode(encoding='ascii', errors='ignore'))
        image = os_conn.create_image(disk_format='raw',
                                     container_format='bare',
                                     name='test_ceph_cinder_cow',
                                     is_public=True,
                                     data=image_data)
        wait(lambda: os_conn.get_image(image.name).status == 'active',
             timeout=60 * 2,
             timeout_msg='Image is not active')

        volume = os_conn.create_volume(size=1, image_id=image.id)

        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            rbd_list = ceph.get_rbd_images_list(remote, 'volumes')

        for item in rbd_list:
            if volume.id in item['image']:
                assert_true(
                    'parent' in item,
                    "Volume {0} created from image {1} doesn't have"
                    " parents. Copy-on-write feature doesn't work.".format(
                        volume.id, image.id))
                assert_true(
                    image.id in item['parent']['image'],
                    "Volume {0} created from image {1}, but have a "
                    "different image in parent: {2}".format(
                        volume.id, image.id, item['parent']['image']))
                break
        else:
            raise Exception("Volume {0} not found!".format(volume.id))
    def ha_one_controller_neutron_blocked_vlan(self):
        """Verify network verification with blocked VLANs

        Scenario:
            1. Create cluster in Ha mode
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Deploy the cluster
            5. Validate cluster was set up correctly, there are no dead
               services, there are no errors in logs
            6. Block first VLAN
            7. Run Verify network and assert it fails
            8. Restore first VLAN

        Duration 20m

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['vlan']
            }
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
        ebtables = self.env.get_ebtables(
            cluster_id, self.env.d_env.nodes().slaves[:2])
        ebtables.restore_vlans()
        try:
            ebtables.block_first_vlan()
            self.fuel_web.verify_network(cluster_id, success=False)
        finally:
            ebtables.restore_first_vlan()
Exemple #16
0
    def deploy_ha_dns_ntp(self):
        """Use external ntp and dns in ha mode

        Scenario:
            1. Create cluster
            2  Configure external NTP,DNS settings
            3. Add 3 nodes with controller roles
            4. Add 2 nodes with compute roles
            5. Deploy the cluster

        """

        self.env.revert_snapshot("ready_with_5_slaves")
        external_dns = settings.EXTERNAL_DNS
        if settings.FUEL_USE_LOCAL_DNS:
            public_gw = self.env.d_env.router(router_name="public")
            external_dns += ',' + public_gw

        net_provider_data = {
            'ntp_list': settings.EXTERNAL_NTP,
            'dns_list': external_dns,
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings=net_provider_data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(self.fuel_web.
                                              get_public_vip(cluster_id))
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=14)

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("deploy_ha_dns_ntp", is_make=True)
Exemple #17
0
    def check_ovs_firewall_functionality(self,
                                         cluster_id,
                                         compute_ip,
                                         dpdk=False):
        """Check firewall functionality

        :param cluster_id: int, cluster id
        :param compute_ip: str, compute ip
        :param dpdk: bool, is DPDK enabled
        """
        flows = self.get_flows(compute_ip)
        if dpdk:
            ifaces = self.get_ovs_bridge_ifaces(compute_ip)
        else:
            ifaces = self.get_ifaces(compute_ip)
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        if dpdk:
            server = self.boot_dpdk_instance(os_conn, cluster_id)
            current_ifaces = self.get_ovs_bridge_ifaces(compute_ip)
        else:
            server = os_conn.create_server_for_migration(label=net_name)
            current_ifaces = self.get_ifaces(compute_ip)
        current_flows = self.get_flows(compute_ip)
        assert_equal(
            len(current_ifaces.stdout) - len(ifaces.stdout), 1,
            "Check is failed:"
            " {}\n\n{}".format(ifaces, current_ifaces))
        assert_not_equal(
            set(flows.stdout), set(current_flows.stdout),
            "Check is failed. Passed data is equal:"
            " {}\n\n{}".format(flows, current_flows))
        float_ip = os_conn.assign_floating_ip(server)
        logger.info("Floating address {0} associated with instance {1}".format(
            float_ip.ip, server.id))

        logger.info("Wait for ping from instance {} "
                    "by floating ip".format(server.id))
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(float_ip.ip, 22),
            timeout=300,
            timeout_msg=("Instance {0} is unreachable for {1} seconds".format(
                server.id, 300)))
        os_conn.delete_instance(server)
    def deploy_ha_one_controller_cinder(self):
        """Deploy cluster in HA mode with cinder

        Scenario:
            1. Create cluster in Ha mode with 1 controller
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Add 1 node with cinder role
            5. Deploy the cluster
            6. Validate cluster was set up correctly, there are no dead
            services, there are no errors in logs
            7. Run OSTF

        Duration 30m
        Snapshot: deploy_ha_one_controller_cinder
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=6, networks_count=1, timeout=300)

        self.fuel_web.check_fixed_network_cidr(
            cluster_id, self.env.get_ssh_to_remote_by_name('slave-01'))
        self.fuel_web.verify_network(cluster_id)
        self.env.verify_network_configuration("slave-01")

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_cinder")
Exemple #19
0
    def __init__(self, obj):
        """Create Test client for run tests.

        :param obj: Test case object
        """
        self.obj = obj
        self.node_roles = [
            role for node in self.obj.fuel_web.client.list_nodes()
            for role in node['roles']
        ]
        cluster_id = self.obj.fuel_web.get_last_created_cluster()
        self.volume_ceph = self.obj.fuel_web.client.get_cluster_attributes(
            cluster_id)['editable']['storage']['volumes_ceph']['value']
        ip = self.obj.fuel_web.get_public_vip(cluster_id)
        self.os_conn = os_actions.OpenStackActions(ip, SERVTEST_USERNAME,
                                                   SERVTEST_PASSWORD,
                                                   SERVTEST_TENANT)
Exemple #20
0
    def dvs_vcenter_reset_controller(self):
        """Verify that vmclusters migrate after reset controller.

        Scenario:
            1. Revert to 'dvs_destructive_setup_2' snapshot.
            2. Verify connection between instances. Send ping,
               check that ping get reply.
            3. Reset controller.
            4. Check that vmclusters migrate to another controller.
            5. Verify connection between instances. Send ping, check that
               ping get reply.

        Duration: 1.8 hours

        """
        self.show_step(1)
        self.env.revert_snapshot("dvs_destructive_setup_2")

        cluster_id = self.fuel_web.get_last_created_cluster()
        os_ip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(os_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        self.show_step(2)
        srv_list = os_conn.get_servers()
        fips = [
            os_conn.get_nova_instance_ip(s,
                                         net_name=self.inter_net_name,
                                         addrtype='floating') for s in srv_list
        ]
        openstack.ping_each_other(fips)

        d_ctrl = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])
        n_ctrl = self.fuel_web.get_nailgun_node_by_devops_node(d_ctrl)

        self.show_step(3)
        self.fuel_web.cold_restart_nodes([d_ctrl], wait_after_destroy=300)

        self.show_step(4)
        openstack.check_service(ip=n_ctrl['ip'], commands=self.cmds)

        self.show_step(5)
        openstack.ping_each_other(fips)
    def check_instance_one_gb_page_size_one_gb_host(self):
        """Boot instance with 1 Gb page size on host with only 1 Gb HugePages

        Scenario:
            1. Revert snapshot "check_hugepages_distribution_per_numa"
            2. Boot and validate instance on compute with only 1 Gb pages
        """
        self.env.revert_snapshot("check_hugepages_distribution_per_numa")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controller_ip = self.fuel_web.get_public_vip(cluster_id)
        self.os_conn = os_actions.OpenStackActions(controller_ip)
        one_gb_host = "slave-02"

        self.boot_instance_with_hugepage(target_compute_name=one_gb_host,
                                         flavor_name="h1.huge.hpgs",
                                         flavor_ram=1024,
                                         page_size=1048576)
    def check_instance_two_mb_page_size_mixed_size_host(self):
        """Boot instance with 2 Mb page size on host with both HugePages types

        Scenario:
            1. Revert snapshot "check_hugepages_distribution_per_numa"
            2. Boot and validate instance on compute with both pages types
        """
        self.env.revert_snapshot("check_hugepages_distribution_per_numa")

        cluster_id = self.fuel_web.get_last_created_cluster()
        controller_ip = self.fuel_web.get_public_vip(cluster_id)
        self.os_conn = os_actions.OpenStackActions(controller_ip)
        mixed_host = "slave-01"

        self.boot_instance_with_hugepage(target_compute_name=mixed_host,
                                         flavor_name="h1.small_mixed.hpgs",
                                         flavor_ram=128,
                                         page_size=2048)
Exemple #23
0
    def configure_openstack(self):
        """
        1. Fetch id of TestVM image
        2. Fetch id of neutron public network and public router
        3. Create non-admin user for keystone
        """
        client = os_actions.OpenStackActions(
            self.options['management_vip'],
            user=self.options['admin_username'],
            passwd=self.options['admin_password'],
            tenant=self.options['admin_tenant_name'])

        self._configure_openstack_keystone(client)
        self._configure_openstack_glance(client)
        if self.options['net_provider'] == 'neutron':
            self._configure_openstack_neutron(client)
        else:
            self._configure_nova_network(client)
Exemple #24
0
    def simple_flat_blocked_vlan(self):
        """Verify network verification with blocked VLANs

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Deploy the cluster
            5. Validate cluster was set up correctly, there are no dead
            services, there are no errors in logs
            6. Block first VLAN
            7. Run Verify network and assert it fails
            8. Restore first VLAN

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE_SIMPLE)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'])
        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=6,
                                           networks_count=1,
                                           timeout=300)

        ebtables = self.env.get_ebtables(cluster_id,
                                         self.env.nodes().slaves[:2])
        ebtables.restore_vlans()
        try:
            ebtables.block_first_vlan()
            self.fuel_web.verify_network(cluster_id, success=False)
        finally:
            ebtables.restore_first_vlan()
    def deploy_one_node(self):
        """Deploy cluster with controller node only

        Scenario:
            1. Create cluster
            2. Add 1 node with controller role
            3. Deploy the cluster
            4. Validate cluster was set up correctly, there are no dead
               services, there are no errors in logs

        Duration 20m

        """
        self.env.revert_snapshot("ready")
        self.fuel_web.client.get_root()
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:1])

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      NEUTRON_SEGMENT_TYPE
                                                  })
        logger.info('cluster is %s' % str(cluster_id))
        self.fuel_web.update_nodes(cluster_id, {'slave-01': ['controller']})
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=4)
        self.fuel_web.run_single_ostf_test(
            cluster_id=cluster_id,
            test_sets=['sanity'],
            test_name=('fuel_health.tests.sanity.test_sanity_identity'
                       '.SanityIdentityTest.test_list_users'))
    def deploy_ha_ceph(self):

        self.check_run(self.snapshot_name)
        self.env.revert_snapshot("ready_with_5_slaves")

        settings = {
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False,
            "net_provider": 'neutron',
            "net_segment_type": NEUTRON_SEGMENT_TYPE
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings=settings)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        public_vip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(public_vip)
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=14)
        self.fuel_web.verify_network(cluster_id)

        for node in ['slave-0{0}'.format(slave) for slave in xrange(1, 4)]:
            with self.fuel_web.get_ssh_for_node(node) as remote:
                check_public_ping(remote)

        self.env.make_snapshot(self.snapshot_name, is_make=True)
    def power_outage_cinder_cluster(self):
        """Power outage of Neutron vlan, cinder/swift cluster

        Scenario:
            1. Pre-condition - do steps from 'deploy_ha_cinder' test
            2. Create 2 instances
            3. Create 2 volumes
            4. Attach volumes to instances
            5. Fill cinder storage up to 30%
            6. Cold shutdown of all nodes
            7. Wait 5 min
            8. Start of all nodes
            9. Wait for HA services ready
            10. Verify networks
            11. Run OSTF tests

        Duration: 30 min
        """

        self.show_step(1, initialize=True)
        self.env.revert_snapshot('deploy_ha_cinder')
        cluster_id = self.fuel_web.get_last_created_cluster()

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id), 'failover', 'failover',
            'failover')
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        server = os_conn.create_instance(
            neutron_network=True, label=net_name)
        volume = os_conn.create_volume()
        os_conn.attach_volume(volume, server)
        server = os_conn.create_instance(
            flavor_name='test_flavor1',
            server_name='test_instance1',
            neutron_network=True, label=net_name)
        vol = os_conn.create_volume()
        os_conn.attach_volume(vol, server)

        self.show_step(5)
        with self.fuel_web.get_ssh_for_node('slave-04') as remote:
            file_name = 'test_data'
            result = remote.execute(
                'lvcreate -n test -L20G cinder')['exit_code']
            assert_equal(result, 0, "The file {0} was not "
                                    "allocated".format(file_name))

        self.show_step(6)
        self.show_step(7)
        self.show_step(8)
        self.fuel_web.cold_restart_nodes(
            self.env.d_env.get_nodes(name__in=[
                'slave-01',
                'slave-02',
                'slave-03',
                'slave-04',
                'slave-05']), wait_after_destroy=300)

        self.show_step(9)
        self.fuel_web.assert_ha_services_ready(cluster_id)

        self.show_step(10)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(11)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
Exemple #28
0
    def compute_stop_reinstallation(self):
        """Verify stop reinstallation of compute.

        Scenario:
            1. Revert the snapshot
            2. Create an OS volume and OS instance
            3. Mark 'cinder' and 'vm' partitions to be preserved
            4. Stop reinstallation process of compute
            5. Start the reinstallation process again
            6. Run network verification
            7. Run OSTF
            8. Verify that the volume is present and has 'available' status
               after the node reinstallation
            9. Verify that the VM is available and pingable
               after the node reinstallation

        Duration: 115m

        """
        self.env.revert_snapshot("node_reinstallation_env")

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Create an OS volume
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        volume = os_conn.create_volume()

        # Create an OS instance
        cmp_host = os_conn.get_hypervisors()[0]

        net_label = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']

        vm = os_conn.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(cmp_host.hypervisor_hostname),
            label=net_label)
        vm_floating_ip = os_conn.assign_floating_ip(vm)
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(vm_floating_ip.ip, 22),
            timeout=120)

        cmp_nailgun = self.fuel_web.get_nailgun_node_by_fqdn(
            cmp_host.hypervisor_hostname)

        # Mark 'cinder' and 'vm' partitions to be preserved
        with self.env.d_env.get_admin_remote() as remote:
            PartitionPreservation._preserve_partition(remote,
                                                      cmp_nailgun['id'],
                                                      "cinder")
            PartitionPreservation._preserve_partition(remote,
                                                      cmp_nailgun['id'], "vm")

        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        devops_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes(
            slave_nodes)

        logger.info('Stop reinstallation process')
        self._stop_reinstallation(self.fuel_web, cluster_id,
                                  [str(cmp_nailgun['id'])], devops_nodes)

        self.fuel_web.verify_network(cluster_id)
        logger.info('Start the reinstallation process again')
        NodeReinstallationEnv._reinstall_nodes(self.fuel_web, cluster_id,
                                               [str(cmp_nailgun['id'])])

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity'])

        # Verify that the created volume is still available
        try:
            volume = os_conn.cinder.volumes.get(volume.id)
        except NotFound:
            raise AssertionError(
                "{0} volume is not available after its {1} hosting node "
                "reinstallation".format(volume.id, cmp_nailgun['fqdn']))
        expected_status = "available"
        assert_equal(
            expected_status, volume.status,
            "{0} volume status is {1} after its {2} hosting node "
            "reinstallation. Expected status is {3}.".format(
                volume.id, volume.status, cmp_nailgun['fqdn'],
                expected_status))

        # Verify that the VM is still available
        try:
            os_conn.verify_instance_status(vm, 'ACTIVE')
        except AssertionError:
            raise AssertionError(
                "{0} VM is not available after its {1} hosting node "
                "reinstallation".format(vm.name, cmp_host.hypervisor_hostname))
        assert_true(
            devops_helpers.tcp_ping(vm_floating_ip.ip, 22),
            "{0} VM is not accessible via its {1} floating "
            "ip".format(vm.name, vm_floating_ip))
Exemple #29
0
    def huge_ha_neutron_vlan_ceph_ceilometer_rados(self):
        """Deploy cluster in HA mode with Neutron VLAN, RadosGW

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller and ceph role
            3. Add 3 nodes with compute and ceph roles
            4. Add 3 nodes with mongo roles
            5. Deploy the cluster
            6. Verify smiles count
            7. Run OSTF

        Duration 100m

        """
        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'ceilometer': True,
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False,
            'ceilometer': True,
            'objects_ceph': True,
            'net_provider': 'neutron',
            'net_segment_type': 'vlan',
            'tenant': 'haVlanCephHugeScale',
            'user': '******',
            'password': '******'
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute', 'ceph-osd'],
                'slave-06': ['compute', 'ceph-osd'],
                'slave-07': ['mongo'],
                'slave-08': ['mongo'],
                'slave-09': ['mongo']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id), data['user'],
            data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=15,
                                           networks_count=2,
                                           timeout=300)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        test_class_main = ('fuel_health.tests.platform_tests.'
                           'test_ceilometer.'
                           'CeilometerApiPlatformTests')
        tests_names = ['test_check_alarm_state', 'test_create_sample']
        test_classes = [
            '{0}.{1}'.format(test_class_main, test_name)
            for test_name in tests_names
        ]
        for test_name in test_classes:
            self.fuel_web.run_single_ostf_test(cluster_id=cluster_id,
                                               test_sets=['platform_tests'],
                                               test_name=test_name,
                                               timeout=60 * 20)
Exemple #30
0
    def deploy_neutron_vlan_ha(self):
        """Deploy cluster in HA mode with Neutron VLAN

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 2 nodes with compute role
            4. Deploy the cluster
            5. Run network verification
            6. Run OSTF

        Duration 80m
        Snapshot deploy_neutron_vlan_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['vlan']
            }
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute']
            }
        )
        self.fuel_web.update_internal_network(cluster_id, '192.168.196.0/22',
                                              '192.168.196.1')
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        # assert_equal(str(cluster['net_segment_type']), segment_type)
        self.fuel_web.check_fixed_network_cidr(
            cluster_id, os_conn)

        self.fuel_web.verify_network(cluster_id)
        devops_node = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])
        logger.debug("devops node name is {0}".format(devops_node.name))
        _ip = self.fuel_web.get_nailgun_node_by_name(devops_node.name)['ip']
        with self.env.d_env.get_ssh_to_remote(_ip) as remote:
            for i in range(5):
                try:
                    checkers.check_swift_ring(remote)
                    break
                except AssertionError:
                    result = remote.execute(
                        "/usr/local/bin/swift-rings-rebalance.sh")
                    logger.debug("command execution result is {0}"
                                 .format(result))
            else:
                checkers.check_swift_ring(remote)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])

        self.env.make_snapshot("deploy_neutron_vlan_ha")