예제 #1
0
    def controller_ceph_and_compute_cinder(self):
        """Deployment with 3 Controllers plus Ceph, Neutron Vxlan
           and non-default disks partition

        Scenario:
            1. Create new environment
            2. Choose Neutron Vxlan
            3. Choose Cinder for volumes and Ceph for images
            4. Add 3 controller+ceph
            5. Add 1 compute+cinder
            6. Verify networks
            7. Change disk configuration for all Ceph nodes.
               Change 'Ceph' volume for vdc
            8. Deploy the environment
            9. Verify networks
            10. Run OSTF tests

        Duration 180m
        Snapshot controller_ceph_and_compute_cinder
        """

        self.env.revert_snapshot("ready_with_5_slaves")

        data = {
            'volumes_lvm': True,
            'images_ceph': True,
            'tenant': 'controllercephcomputecinder',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }
        self.show_step(1, initialize=True)
        self.show_step(2)
        self.show_step(3)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.show_step(4)
        self.show_step(5)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'cinder']
            }
        )
        self.show_step(6)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(7)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.show_step(8)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("controller_ceph_and_compute_cinder")
    def ceph_for_volumes_images_ephemeral(self):
        """Deployment with 5 controllers, NeutronVLAN,
           with Ceph for volumes and images, ephemeral

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Choose Ceph for volumes and images, ceph for ephemeral
            4. Add 5 controller
            5. Add 2 compute
            6. Add 2 ceph nodes
            7. Change ceph replication factor to 2
            8. Change management net default mask from /24 to /25
            9. Change default disk partitioning for ceph nodes for vdc
            10. Verify networks
            11. Deploy changes
            12. Verify networks
            13. Run OSTF

        Duration 180m
        Snapshot ceph_for_volumes_images_ephemeral
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            "volumes_lvm": False,
            "volumes_ceph": True,
            "images_ceph": True,
            "ephemeral_ceph": True,
            "osd_pool_size": "2",
            "tenant": "cephforvolumesimagesephemeral",
            "user": "******",
            "password": "******",
        }
        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, settings=data)
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.show_step(6)
        self.show_step(7)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["controller"],
                "slave-05": ["controller"],
                "slave-06": ["compute"],
                "slave-07": ["compute"],
                "slave-08": ["ceph-osd"],
                "slave-09": ["ceph-osd"],
            },
        )
        self.show_step(8)
        self.fuel_web.update_network_cidr(cluster_id, "management")

        self.show_step(9)
        ceph_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ["ceph-osd"], role_status="pending_roles"
        )
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.update_node_partitioning(ceph_node, node_role="ceph")

        self.show_step(10)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(11)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)
        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph["ip"], ceph_image_size)

        self.show_step(13)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ceph_for_volumes_images_ephemeral")
예제 #3
0
    def cinder_ceph_for_images(self):
        """Deploy cluster with cinder and ceph for images

        Scenario:
            1. Create cluster
            2. Add 3 node with controller role
            3. Add 2 node with compute role
            4. Add 3 nodes with ceph OSD roles
            5. Add 1 node with cinder
            6. Change disks configuration for ceph nodes
            7. Verify networks
            8. Deploy the cluster
            9. Verify networks
            10. Run OSTF

        Duration 180m
        Snapshot cinder_ceph_for_images
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': True,
            'volumes_ceph': False,
            'images_ceph': True,
            'tenant': 'cindercephforimages',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['cinder'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            }
        )
        self.fuel_web.verify_network(cluster_id)

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                checkers.check_ceph_image_size(remote, ceph_image_size)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("cinder_ceph_for_images")
예제 #4
0
    def tun_no_volumes_ceph_for_images_and_ephemeral(self):
        """Deployment with 3 controllers, NeutronVxLAN,
           with no storage for volumes and ceph for images and ephemeral

        Scenario:
            1. Create new environment
            2. Choose Neutron, VxLAN
            3. Uncheck cinder for volumes and choose ceph for images,
               ceph for ephemeral
            4. Change ceph replication factor to 2
            5. Add 3 controller
            6. Add 2 compute
            7. Add 2 ceph nodes
            8. Change default disks partitioning for ceph nodes for 'vdc'
            9. Change default dns server to any 2 public dns servers
               to the 'Host OS DNS Servers' on Settings tab
            10. Change default ntp servers to any 2 public ntp servers
                to the 'Host OS NTP Servers' on Settings tab
            11. Change default public net mask from /24 to /25
            12. Verify networks
            13. Deploy cluster
            14. Verify networks
            15. Run OSTF

        Duration 180m
        Snapshot tun_no_volumes_ceph_for_images_and_ephemeral
        """
        self.env.revert_snapshot("ready_with_9_slaves")

        if len(settings.EXTERNAL_DNS) < 2:
            logging.warning("Less than 2 DNS servers was configured!")

        if len(settings.EXTERNAL_NTP) < 2:
            logging.warning("Less than 2 NTP servers was configured!")

        data = {
            'tenant': 'TunNoVolumesCeph',
            'user': '******',
            'password': '******',

            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],

            'dns_list': settings.EXTERNAL_DNS,
            'ntp_list': settings.EXTERNAL_NTP,

            'volumes_lvm': False,
            'volumes_ceph': False,
            'images_ceph': True,
            'objects_ceph': False,
            'ephemeral_ceph': True,
            'osd_pool_size': '2'
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
            }
        )

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, roles=['controller'])
        vrouter_vip = self.fuel_web.get_management_vrouter_vip(cluster_id)
        for node in ctrls:
            checkers.external_dns_check(node['ip'])
            checkers.external_ntp_check(node['ip'], vrouter_vip)

        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("tun_no_volumes_ceph_for_images_and_ephemeral")
예제 #5
0
    def ceph_for_volumes_images_ephemeral(self):
        """Deployment with 5 controllers, NeutronVLAN,
           with Ceph for volumes and images, ephemeral

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Choose Ceph for volumes and images, ceph for ephemeral
            4. Add 5 controller
            5. Add 2 compute
            6. Add 2 ceph nodes
            7. Change ceph replication factor to 2
            8. Change management net default mask from /24 to /25
            9. Change default disk partitioning for ceph nodes for vdc
            10. Verify networks
            11. Deploy changes
            12. Verify networks
            13. Run OSTF

        Duration 180m
        Snapshot ceph_for_volumes_images_ephemeral
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'osd_pool_size': "2",
            'tenant': 'cephforvolumesimagesephemeral',
            'user': '******',
            'password': '******'
        }
        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.show_step(6)
        self.show_step(7)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['compute'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })
        self.show_step(8)
        self.fuel_web.update_network_cidr(cluster_id, 'management')

        self.show_step(9)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.show_step(10)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(11)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)
        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        self.show_step(13)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ceph_for_volumes_images_ephemeral")
예제 #6
0
    def cinder_ceph_for_images_ephemeral_rados(self):
        """Deployment with 3 controllers, NeutronVLAN, with cinder for volumes
           and ceph for images, ephemeral and Rados GW for objects

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Choose cinder for volumes and ceph for images, ceph for
               ephemeral and Rados GW for objects
            4. Add 3 controller
            5. Add 2 compute
            6. Add 3 ceph nodes
            7. Add 1 cinder node
            8. Change default public net mask from /24 to /25
            9. Change default partitioning for ceph and cinder nodes for vdc
            10. Change default dns server to any 2 public dns servers to the
                'Host OS DNS Servers' on Settings tab
            11. Change default ntp servers to any 2 public ntp servers to the
                'Host OS NTP Servers' on Settings tab
            12. Verify networks
            13. Deploy cluster
            14. Verify networks
            15. Run OSTF

        Duration 180m
        Snapshot cinder_ceph_for_images_ephemeral_rados
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'tenant': 'cindercephforimagesephemeralrados',
            'user': '******',
            'password': '******',
            'ntp_list': settings.EXTERNAL_NTP,
            'dns_list': settings.EXTERNAL_DNS
        }
        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.show_step(6)
        self.show_step(7)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['cinder']
            })
        self.show_step(8)
        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.show_step(9)
        self.show_step(10)
        self.show_step(11)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        cinder_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'],
                                               role_status='pending_roles')
        for cinder_node in cinder_nodes:
            cinder_image_size = self.fuel_web.\
                update_node_partitioning(cinder_node, node_role='cinder')

        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(13)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(14)
        self.fuel_web.verify_network(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        for cinder in cinder_nodes:
            checkers.check_cinder_image_size(cinder['ip'], cinder_image_size)

        self.show_step(15)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("cinder_ceph_for_images_ephemeral_rados")
예제 #7
0
    def cinder_ceph_for_images_ephemeral_rados(self):
        """Deploy cluster with cinder volumes and ceph for images,
           ephemeral, rados

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 2 nodes with compute role
            4. Add 3 nodes with ceph OSD roles
            5. Add 1 cinder node
            6. Change disks configuration for ceph and cinder nodes
            7. Change default dns server
            8. Change default NTP server
            9. Change public net mask from /24 to /25
            10. Verify networks
            11. Deploy the cluster
            12. Check ceph status
            13. Verify networks
            14. Check ceph disks partitioning
            15. Run OSTF

        Duration 180m
        Snapshot cinder_ceph_for_images_ephemeral_rados
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'tenant': 'cindercephforimagesephemeralrados',
            'user': '******',
            'password': '******',
            'ntp_list': settings.EXTERNAL_NTP,
            'dns_list': settings.EXTERNAL_DNS
        }
        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['cinder']
            })
        self.show_step(9)
        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.show_step(6)
        self.show_step(7)
        self.show_step(8)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        cinder_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'],
                                               role_status='pending_roles')
        d_cinder = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for cinder_node in cinder_nodes:
            cinder_image_size = self.fuel_web.\
                update_node_partitioning(cinder_node, node_role='cinder')

        self.show_step(10)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(11)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(12)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(13)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(14)
        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                checkers.check_ceph_image_size(remote, ceph_image_size)

        for devops_cinder in d_cinder:
            with self.fuel_web.get_ssh_for_node(devops_cinder.name) as remote:
                checkers.check_cinder_image_size(remote, cinder_image_size)

        self.show_step(15)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("cinder_ceph_for_images_ephemeral_rados")
예제 #8
0
    def controller_ceph_and_compute_cinder(self):
        """Deploy cluster with controller+ceph and compute+cinder

        Scenario:
            1. Create cluster
            2. Choose cinder and ceph for images
            3. Add 3 node with controller+ceph role
            4. Add 1 node with compute+cinder role
            5. Change disks configuration for ceph nodes
            6. Verify networks
            7. Deploy the cluster
            8. Check ceph status
            9. Verify networks
            10. Check partitions on ceph nodes
            11. Run OSTF

        Duration 180m
        Snapshot controller_ceph_and_compute_cinder
        """

        self.env.revert_snapshot("ready_with_5_slaves")

        data = {
            'volumes_lvm': True,
            'images_ceph': True,
            'tenant': 'controllercephcomputecinder',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }
        self.show_step(1, initialize=True)
        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'cinder']
            })
        self.show_step(6)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(5)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(8)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(10)

        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                checkers.check_ceph_image_size(remote, ceph_image_size)

        self.show_step(11)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("controller_ceph_and_compute_cinder")
예제 #9
0
    def tun_no_volumes_ceph_for_images_and_ephemeral(self):
        """Deployment with 3 controllers, NeutronVxLAN, with no storage for
        volumes and Ceph for images and ephemeral

        Scenario:
            1. Create cluster using NeutronTUN provider, external dns and ntp
               servers, no storage for volumes, Ceph for Images and ephemeral,
               Ceph replica factor 2
            2. Add 3 nodes with controller role
            3. Add 2 nodes with compute role
            4. Add 2 nodes with ceph OSD role
            5. Change default partitioning for vdc of Ceph node
            6. Change public network from /24 to /25
            7. Verify networks
            8. Deploy the cluster
            9. Validate partition on Ceph node
            10. Verify networks
            11. Run OSTF

        Duration 180m
        Snapshot tun_no_volumes_ceph_for_images_and_ephemeral
        """
        self.env.revert_snapshot("ready_with_9_slaves")

        if len(settings.EXTERNAL_DNS.split(',')) < 2:
            logging.warning("Less than 2 DNS servers was configured!")

        if len(settings.EXTERNAL_NTP.split(',')) < 2:
            logging.warning("Less than 2 NTP servers was configured!")

        data = {
            'tenant': 'TunNoVolumesCeph',
            'user': '******',
            'password': '******',

            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],

            'dns_list': settings.EXTERNAL_DNS,
            'ntp_list': settings.EXTERNAL_NTP,

            'volumes_lvm': False,
            'volumes_ceph': False,
            'images_ceph': True,
            'objects_ceph': False,
            'ephemeral_ceph': True,
            'osd_pool_size': '2'
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
            }
        )

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                checkers.check_ceph_image_size(remote, ceph_image_size)

        ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, roles=['controller'])
        vrouter_vip = self.fuel_web.get_management_vrouter_vip(cluster_id)
        for node in ctrls:
            with self.fuel_web.get_ssh_for_nailgun_node(node) as remote:
                checkers.external_dns_check(remote)
                checkers.external_ntp_check(remote, vrouter_vip)

        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("tun_no_volumes_ceph_for_images_and_ephemeral")
예제 #10
0
    def ceph_for_volumes_images_ephemeral(self):
        """Deploy cluster with ceph for volumes and images, ephemeral

        Scenario:
            1. Create cluster
            2. Add 5 node with controller role
            3. Add 2 node with compute role
            4. Add 2 nodes with ceph OSD roles
            5. Change disks configuration for ceph nodes
            6. Change management net mask from /24 to /25
            7. Verify networks
            8. Deploy the cluster
            9. Check ceph status
            10. Verify networks
            11. Check partitioning for ceph nodes
            12. Run OSTF

        Duration 180m
        Snapshot ceph_for_volumes_images_ephemeral
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'osd_pool_size': "2",
            'tenant': 'cephforvolumesimagesephemeral',
            'user': '******',
            'password': '******'
        }
        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['compute'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })
        self.show_step(6)
        self.fuel_web.update_network_cidr(cluster_id, 'management')

        self.show_step(5)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(8)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(9)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(10)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(11)
        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                checkers.check_ceph_image_size(remote, ceph_image_size)

        self.show_step(12)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ceph_for_volumes_images_ephemeral")
예제 #11
0
    def tun_5_ctrl_ceph_ephemeral(self):
        """Deployment with 5 controllers, NeutronTUN, with Ceph RDB for
        ephemeral volumes

        Scenario:
            1. Create cluster using NeutronTUN provider, Ceph RDB for ephemeral
               volumes
            2. Add 5 nodes with controller role
            3. Add 1 nodes with compute role
            4. Add 3 nodes with ceph OSD role
            5. Change default partitioning for vdc of Ceph nodes
            6. Change public network mask from /24 to /25
            7. Verify networks
            8. Deploy the cluster
            9. Validate partition on Ceph node
            10. Verify networks
            11. Run OSTF

        Duration XXXm
        Snapshot tun_5_ctrl_ceph_ephemeral
        """
        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],

            'tenant': 'TunCephEphemeral',
            'user': '******',
            'password': '******',

            'volumes_lvm': False,
            'ephemeral_ceph': True,
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['compute'],
            }
        )
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                # TODO: add pool size check
                checkers.check_ceph_image_size(remote, ceph_image_size)

        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("tun_5_ctrl_ceph_ephemeral")
예제 #12
0
    def controller_ceph_and_compute_cinder(self):
        """Deploy cluster with controller+ceph and compute+cinder

        Scenario:
            1. Create cluster
            2. Choose cinder and ceph for images
            3. Add 3 node with controller+ceph role
            4. Add 1 node with compute+cinder role
            5. Change disks configuration for ceph nodes
            6. Verify networks
            7. Deploy the cluster
            8. Check ceph status
            9. Verify networks
            10. Check partitions on ceph nodes
            11. Run OSTF

        Duration 180m
        Snapshot controller_ceph_and_compute_cinder
        """

        self.env.revert_snapshot("ready_with_5_slaves")

        data = {
            'volumes_lvm': True,
            'images_ceph': True,
            'tenant': 'controllercephcomputecinder',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }
        self.show_step(1, initialize=True)
        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'cinder']
            }
        )
        self.show_step(6)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(5)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(8)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(10)

        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                checkers.check_ceph_image_size(remote, ceph_image_size)

        self.show_step(11)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("controller_ceph_and_compute_cinder")
예제 #13
0
    def ceph_for_images(self):
        """Deployment with 3 controllers, NeutronVLAN,
           with no storage for volumes and ceph for images

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Uncheck cinder for volumes and choose ceph for images
            4. Add 3 controller
            5. Add 2 compute
            6. Add 3 ceph nodes
            7. Change default disks partitioning for ceph nodes for 'vdc'
            8. Change default dns server to any 2 public dns servers to the
               'Host OS DNS Servers' on Settings tab
            9. Change default ntp servers to any 2 public ntp servers to the
               'Host OS NTP Servers' on Settings tab
            10. Untag management and storage networks
                and move them to separate interfaces
            11. Verify networks
            12. Deploy cluster
            13. Verify networks
            14. Run OSTF

        Duration 180m
        Snapshot ceph_for_images
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': False,
            'volumes_ceph': False,
            'images_ceph': True,
            'osd_pool_size': "3",
            'tenant': 'cephforimages',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            'ntp_list': settings.EXTERNAL_NTP,
            'dns_list': settings.EXTERNAL_DNS
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd']
            }
        )
        self.fuel_web.verify_network(cluster_id)

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ceph_for_images")
    def ceph_for_images_ephemeral_rados(self):
        """Deployment with 3 controllers, NeutronVLAN, with no storage for
           volumes and ceph for images, ephemeral and Rados GW for objects

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Uncheck cinder for volumes and choose ceph for images,
               ceph for ephemeral and Rados GW for objects
            4. Add 3 controller
            5. Add 2 compute
            6. Add 3 ceph nodes
            7. Verify networks
            8. Change default disks partitioning for ceph nodes for 'vdc'
            9. Change default dns server to any 2 public dns servers to the
               'Host OS DNS Servers' on Settings tab
            10. Change default ntp servers to any 2 public ntp servers to the
                'Host OS NTP Servers' on Settings tab
            11. Deploy cluster
            12. Verify networks
            13. Run OSTF

        Duration 180m
        Snapshot ceph_for_images_ephemeral_rados
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            "volumes_lvm": False,
            "volumes_ceph": False,
            "images_ceph": True,
            "ephemeral_ceph": True,
            "objects_ceph": True,
            "tenant": "cephforimagesephemeralrados",
            "user": "******",
            "password": "******",
            "ntp_list": settings.EXTERNAL_NTP,
            "dns_list": settings.EXTERNAL_DNS,
        }
        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, settings=data)
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.show_step(6)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["compute"],
                "slave-06": ["ceph-osd"],
                "slave-07": ["ceph-osd"],
                "slave-08": ["ceph-osd"],
            },
        )
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        self.show_step(9)
        self.show_step(10)
        ceph_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ["ceph-osd"], role_status="pending_roles"
        )
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.update_node_partitioning(ceph_node, node_role="ceph")

        self.show_step(11)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph["ip"], ceph_image_size)

        self.show_step(13)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ceph_for_images_ephemeral_rados")
예제 #15
0
    def tun_5_ctrl_ceph_ephemeral(self):
        """Deployment with 5 controllers, NeutronTUN, with Ceph RDB for
        ephemeral volumes

        Scenario:
            1. Create cluster using NeutronTUN provider, Ceph RDB for ephemeral
               volumes
            2. Add 5 nodes with controller role
            3. Add 1 nodes with compute role
            4. Add 3 nodes with ceph OSD role
            5. Change default partitioning for vdc of Ceph nodes
            6. Change public network mask from /24 to /25
            7. Verify networks
            8. Deploy the cluster
            9. Validate partition on Ceph node
            10. Verify networks
            11. Run OSTF

        Duration XXXm
        Snapshot tun_5_ctrl_ceph_ephemeral
        """
        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],
            'tenant': 'TunCephEphemeral',
            'user': '******',
            'password': '******',
            'volumes_lvm': False,
            'ephemeral_ceph': True,
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['compute'],
            })
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                # TODO: add pool size check
                checkers.check_ceph_image_size(remote, ceph_image_size)

        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("tun_5_ctrl_ceph_ephemeral")
예제 #16
0
    def deploy_reset_five_ceph_controllers(self):
        """Deployment with 5 controllers, NeutronVLAN, with Ceph for volumes,
           stop on deployment

        Scenario:
        1. Start deploy environment, 5 controller, 2 compute, 2 ceph nodes,
           Neutron VLAN
        2. Change default partitioning scheme for both ceph nodes for 'vdc'
        3. Stop process on controller deployment
        4. Change openstack username, password, tenant
        5. Deploy cluster
        6. Wait for HA services to be ready
        7. Wait for for OS services to be ready
        8. Verify networks
        9. Run OSTF tests

        Duration 120m
        Snapshot deploy_reset_five_ceph_controllers

        """

        self.env.revert_snapshot("ready_with_all_slaves")

        self.show_step(1)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      'volumes_lvm':
                                                      False,
                                                      'volumes_ceph':
                                                      True,
                                                      'images_ceph':
                                                      True,
                                                      'osd_pool_size':
                                                      "2",
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      NEUTRON_SEGMENT['vlan'],
                                                      'tenant':
                                                      'simpleVlan',
                                                      'user':
                                                      '******',
                                                      'password':
                                                      '******'
                                                  })
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['compute'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })

        self.show_step(2)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.deploy_cluster_wait_progress(cluster_id=cluster_id,
                                                   progress=5)
        self.show_step(3)
        self.fuel_web.stop_deployment_wait(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:9], timeout=10 * 60)
        self.show_step(4)
        attributes = self.fuel_web.client.get_cluster_attributes(cluster_id)
        access_attr = attributes['editable']['access']
        access_attr['user']['value'] = 'myNewUser'
        access_attr['password']['value'] = 'myNewPassword'
        access_attr['tenant']['value'] = 'myNewTenant'
        self.fuel_web.client.update_cluster_attributes(cluster_id, attributes)

        self.show_step(5)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(6)
        self.fuel_web.assert_ha_services_ready(cluster_id)
        self.show_step(7)
        self.fuel_web.assert_os_services_ready(cluster_id, timeout=10 * 60)
        self.show_step(8)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'],
                                           expected_size=ceph_image_size)

        self.env.make_snapshot("deploy_reset_five_ceph_controllers")
예제 #17
0
    def tun_no_volumes_ceph_for_images_and_ephemeral(self):
        """Deployment with 3 controllers, NeutronVxLAN, with no storage for
        volumes and Ceph for images and ephemeral

        Scenario:
            1. Create cluster using NeutronTUN provider, external dns and ntp
               servers, no storage for volumes, Ceph for Images and ephemeral,
               Ceph replica factor 2
            2. Add 3 nodes with controller role
            3. Add 2 nodes with compute role
            4. Add 2 nodes with ceph OSD role
            5. Change default partitioning for vdc of Ceph node
            6. Change public network from /24 to /25
            7. Verify networks
            8. Deploy the cluster
            9. Validate partition on Ceph node
            10. Verify networks
            11. Run OSTF

        Duration 180m
        Snapshot tun_no_volumes_ceph_for_images_and_ephemeral
        """
        self.env.revert_snapshot("ready_with_9_slaves")

        if len(settings.EXTERNAL_DNS.split(',')) < 2:
            logging.warning("Less than 2 DNS servers was configured!")

        if len(settings.EXTERNAL_NTP.split(',')) < 2:
            logging.warning("Less than 2 NTP servers was configured!")

        data = {
            'tenant': 'TunNoVolumesCeph',
            'user': '******',
            'password': '******',
            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],
            'dns_list': settings.EXTERNAL_DNS,
            'ntp_list': settings.EXTERNAL_NTP,
            'volumes_lvm': False,
            'volumes_ceph': False,
            'images_ceph': True,
            'objects_ceph': False,
            'ephemeral_ceph': True,
            'osd_pool_size': '2'
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
            })

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                checkers.check_ceph_image_size(remote, ceph_image_size)

        ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, roles=['controller'])
        vrouter_vip = self.fuel_web.get_management_vrouter_vip(cluster_id)
        for node in ctrls:
            with self.fuel_web.get_ssh_for_nailgun_node(node) as remote:
                checkers.external_dns_check(remote)
                checkers.external_ntp_check(remote, vrouter_vip)

        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("tun_no_volumes_ceph_for_images_and_ephemeral")
예제 #18
0
    def cinder_ceph_for_images(self):
        """Deployment with 3 controllers, NeutronVLAN,
           with Ceph for images and other disk configuration

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Choose Ceph for images
            4. Add 3 controller
            5. Add 2 compute
            6. Add 1 cinder
            7. Add 3 ceph
            8. Change disk configuration for both Ceph nodes.
               Change 'Ceph' volume for vdc
            9. Verify networks
            10. Deploy the environment
            11. Verify networks
            12. Run OSTF tests

        Duration 180m
        Snapshot cinder_ceph_for_images
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': True,
            'volumes_ceph': False,
            'images_ceph': True,
            'tenant': 'cindercephforimages',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['cinder'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            }
        )
        self.fuel_web.verify_network(cluster_id)

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("cinder_ceph_for_images")
예제 #19
0
    def tun_5_ctrl_ceph_ephemeral(self):
        """Deployment with 5 controllers, NeutronTUN,
           with Ceph RBD for ephemeral volumes

        Scenario:
            1. Create new environment
            2. Choose Neutron, tunnelling segmentation
            3. Choose Ceph RBD for ephemeral volumes
               and uncheck Cinder LVM over iSCSI for volumes
            4. Add 5 controllers
            5. Add 1 compute
            6. Add 3 ceph
            7. Change default disks partitioning for ceph nodes for vdc
            8. Change public default mask from /24 to /25
            9. Verify networks
            10. Deploy the environment
            11. Verify networks
            12. Run OSTF tests

        Duration XXXm
        Snapshot tun_5_ctrl_ceph_ephemeral
        """
        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],
            'tenant': 'TunCephEphemeral',
            'user': '******',
            'password': '******',
            'volumes_lvm': False,
            'ephemeral_ceph': True,
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['compute'],
            })
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for ceph in ceph_nodes:
            # TODO: add pool size check
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("tun_5_ctrl_ceph_ephemeral")
예제 #20
0
    def ceph_for_images_ephemeral_rados(self):
        """Deploy cluster with ceph for images, ephemeral and rados

        Scenario:
            1. Create cluster
            2. Add 3 node with controller role
            3. Add 2 node with compute role
            4. Add 3 nodes with ceph OSD roles
            5. Change disks configuration for ceph nodes
            6. Change default dns and NTP values
            7. Verify networks
            8. Deploy the cluster
            9. Check ceph status
            10. Verify networks
            11. Check partitioning for ceph disks
            12. Run OSTF

        Duration 180m
        Snapshot ceph_for_images_ephemeral_rados
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': False,
            'volumes_ceph': False,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'tenant': 'cephforimagesephemeralrados',
            'user': '******',
            'password': '******',
            'ntp_list': settings.EXTERNAL_NTP,
            'dns_list': settings.EXTERNAL_DNS
        }
        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd']
            }
        )
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(5)
        self.show_step(6)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        d_ceph = self.fuel_web.get_devops_nodes_by_nailgun_nodes(ceph_nodes)
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.show_step(8)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(9)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(10)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(11)
        for devops_ceph in d_ceph:
            with self.fuel_web.get_ssh_for_node(devops_ceph.name) as remote:
                checkers.check_ceph_image_size(remote, ceph_image_size)

        self.show_step(12)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ceph_for_images_ephemeral_rados")
예제 #21
0
    def tun_no_volumes_ceph_for_images_and_ephemeral(self):
        """Deployment with 3 controllers, NeutronVxLAN,
           with no storage for volumes and ceph for images and ephemeral

        Scenario:
            1. Create new environment
            2. Choose Neutron, VxLAN
            3. Uncheck cinder for volumes and choose ceph for images,
               ceph for ephemeral
            4. Change ceph replication factor to 2
            5. Add 3 controller
            6. Add 2 compute
            7. Add 2 ceph nodes
            8. Change default disks partitioning for ceph nodes for 'vdc'
            9. Change default dns server to any 2 public dns servers
               to the 'Host OS DNS Servers' on Settings tab
            10. Change default ntp servers to any 2 public ntp servers
                to the 'Host OS NTP Servers' on Settings tab
            11. Change default public net mask from /24 to /25
            12. Verify networks
            13. Deploy cluster
            14. Verify networks
            15. Run OSTF

        Duration 180m
        Snapshot tun_no_volumes_ceph_for_images_and_ephemeral
        """
        self.env.revert_snapshot("ready_with_9_slaves")

        if len(settings.EXTERNAL_DNS) < 2:
            logging.warning("Less than 2 DNS servers was configured!")

        if len(settings.EXTERNAL_NTP) < 2:
            logging.warning("Less than 2 NTP servers was configured!")

        data = {
            'tenant': 'TunNoVolumesCeph',
            'user': '******',
            'password': '******',
            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],
            'dns_list': settings.EXTERNAL_DNS,
            'ntp_list': settings.EXTERNAL_NTP,
            'volumes_lvm': False,
            'volumes_ceph': False,
            'images_ceph': True,
            'objects_ceph': False,
            'ephemeral_ceph': True,
            'osd_pool_size': '2'
        }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
            })

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, roles=['controller'])
        vrouter_vip = self.fuel_web.get_management_vrouter_vip(cluster_id)
        for node in ctrls:
            checkers.external_dns_check(node['ip'])
            checkers.external_ntp_check(node['ip'], vrouter_vip)

        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("tun_no_volumes_ceph_for_images_and_ephemeral")
예제 #22
0
    def deploy_5_contr_rados_delete(self):
        """Deployment with 5 controllers, NeutronVLAN,
           with Ceph for volumes and images, Rados GW for objects

        Scenario:
        1. Create environment 5 controller, 2 ceph Rados GW for objects,
           2 compute, Neutron VLAN.
        2. Change default disks partitioning for ceph nodes for 'vdc'
        3. Change default dns server to any 2 public dns servers to the
           'Host OS DNS Servers' on Settings tab
        4. Change default ntp servers to any 2 public ntp servers to the
           'Host OS NTP Servers' on Settings tab
        5. Verify networks
        6. Deploy cluster
        7. Verify networks
        8. Run OSTF
        9. Delete env

        Duration 180m
        Snapshot deploy_5_contr_rados_delete
        """

        self.env.revert_snapshot("ready_with_all_slaves")

        data = {
            'volumes_lvm': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': "2",
            'tenant': 'deploy_5_contr_rados_delete',
            'user': '******',
            'password': '******',
            'ntp_list': settings.EXTERNAL_NTP,
            'dns_list': settings.EXTERNAL_DNS
        }
        self.show_step(1)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['compute'],
                'slave-09': ['compute']
            })

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(6)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        self.show_step(8)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.show_step(9)
        self.fuel_web.delete_env_wait(cluster_id=cluster_id)
        self.env.make_snapshot("deploy_5_contr_rados_delete")
예제 #23
0
    def ceph_for_images(self):
        """Deployment with 3 controllers, NeutronVLAN,
           with no storage for volumes and ceph for images

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Uncheck cinder for volumes and choose ceph for images
            4. Add 3 controller
            5. Add 2 compute
            6. Add 3 ceph nodes
            7. Change default disks partitioning for ceph nodes for 'vdc'
            8. Change default dns server to any 2 public dns servers to the
               'Host OS DNS Servers' on Settings tab
            9. Change default ntp servers to any 2 public ntp servers to the
               'Host OS NTP Servers' on Settings tab
            10. Untag management and storage networks
                and move them to separate interfaces
            11. Verify networks
            12. Deploy cluster
            13. Verify networks
            14. Run OSTF

        Duration 180m
        Snapshot ceph_for_images
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': False,
            'volumes_ceph': False,
            'images_ceph': True,
            'osd_pool_size': "3",
            'tenant': 'cephforimages',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            'ntp_list': settings.EXTERNAL_NTP,
            'dns_list': settings.EXTERNAL_DNS
        }
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd']
            })
        self.fuel_web.verify_network(cluster_id)

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("ceph_for_images")
예제 #24
0
    def tun_5_ctrl_ceph_ephemeral(self):
        """Deployment with 5 controllers, NeutronTUN,
           with Ceph RBD for ephemeral volumes

        Scenario:
            1. Create new environment
            2. Choose Neutron, tunnelling segmentation
            3. Choose Ceph RBD for ephemeral volumes
               and uncheck Cinder LVM over iSCSI for volumes
            4. Add 5 controllers
            5. Add 1 compute
            6. Add 3 ceph
            7. Change default disks partitioning for ceph nodes for vdc
            8. Change public default mask from /24 to /25
            9. Verify networks
            10. Deploy the environment
            11. Verify networks
            12. Run OSTF tests

        Duration XXXm
        Snapshot tun_5_ctrl_ceph_ephemeral
        """
        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],

            'tenant': 'TunCephEphemeral',
            'user': '******',
            'password': '******',

            'volumes_lvm': False,
            'ephemeral_ceph': True,
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['compute'],
            }
        )
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for ceph in ceph_nodes:
            # TODO: add pool size check
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("tun_5_ctrl_ceph_ephemeral")
예제 #25
0
    def cinder_ceph_for_images(self):
        """Deployment with 3 controllers, NeutronVLAN,
           with Ceph for images and other disk configuration

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Choose Ceph for images
            4. Add 3 controller
            5. Add 2 compute
            6. Add 1 cinder
            7. Add 3 ceph
            8. Change disk configuration for both Ceph nodes.
               Change 'Ceph' volume for vdc
            9. Verify networks
            10. Deploy the environment
            11. Verify networks
            12. Run OSTF tests

        Duration 180m
        Snapshot cinder_ceph_for_images
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': True,
            'volumes_ceph': False,
            'images_ceph': True,
            'tenant': 'cindercephforimages',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['cinder'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })
        self.fuel_web.verify_network(cluster_id)

        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("cinder_ceph_for_images")
    def deploy_reset_five_ceph_controllers(self):
        """Deployment with 5 controllers, NeutronVLAN, with Ceph for volumes,
           stop on deployment

        Scenario:
        1. Start deploy environment, 5 controller, 2 compute, 2 ceph nodes,
           Neutron VLAN
        2. Change default partitioning scheme for both ceph nodes for 'vdc'
        3. Stop process on controller deployment
        4. Change openstack username, password, tenant
        5. Deploy cluster
        6. Wait for HA services to be ready
        7. Wait for for OS services to be ready
        8. Verify networks
        9. Run OSTF tests

        Duration 120m
        Snapshot deploy_reset_five_ceph_controllers

        """

        self.env.revert_snapshot("ready_with_all_slaves")

        self.show_step(1)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                'volumes_lvm': False,
                'volumes_ceph': True,
                'images_ceph': True,
                'osd_pool_size': "2",
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['vlan'],
                'tenant': 'simpleVlan',
                'user': '******',
                'password': '******'
            }
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['compute'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            }
        )

        self.show_step(2)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        self.fuel_web.deploy_cluster_wait_progress(cluster_id=cluster_id,
                                                   progress=5)
        self.show_step(3)
        self.fuel_web.stop_deployment_wait(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:9], timeout=10 * 60)
        self.show_step(4)
        attributes = self.fuel_web.client.get_cluster_attributes(cluster_id)
        access_attr = attributes['editable']['access']
        access_attr['user']['value'] = 'myNewUser'
        access_attr['password']['value'] = 'myNewPassword'
        access_attr['tenant']['value'] = 'myNewTenant'
        self.fuel_web.client.update_cluster_attributes(cluster_id, attributes)

        self.show_step(5)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.show_step(6)
        self.fuel_web.assert_ha_services_ready(cluster_id)
        self.show_step(7)
        self.fuel_web.assert_os_services_ready(cluster_id, timeout=10 * 60)
        self.show_step(8)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(9)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            test_sets=['ha', 'smoke', 'sanity'])

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'],
                                           expected_size=ceph_image_size)

        self.env.make_snapshot("deploy_reset_five_ceph_controllers")
예제 #27
0
    def cinder_ceph_for_images_ephemeral_rados(self):
        """Deployment with 3 controllers, NeutronVLAN, with cinder for volumes
           and ceph for images, ephemeral and Rados GW for objects

        Scenario:
            1. Create new environment
            2. Choose Neutron, VLAN
            3. Choose cinder for volumes and ceph for images, ceph for
               ephemeral and Rados GW for objects
            4. Add 3 controller
            5. Add 2 compute
            6. Add 3 ceph nodes
            7. Add 1 cinder node
            8. Change default public net mask from /24 to /25
            9. Change default partitioning for ceph and cinder nodes for vdc
            10. Change default dns server to any 2 public dns servers to the
                'Host OS DNS Servers' on Settings tab
            11. Change default ntp servers to any 2 public ntp servers to the
                'Host OS NTP Servers' on Settings tab
            12. Verify networks
            13. Deploy cluster
            14. Verify networks
            15. Run OSTF

        Duration 180m
        Snapshot cinder_ceph_for_images_ephemeral_rados
        """

        self.env.revert_snapshot("ready_with_9_slaves")

        data = {
            'volumes_lvm': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'tenant': 'cindercephforimagesephemeralrados',
            'user': '******',
            'password': '******',
            'ntp_list': settings.EXTERNAL_NTP,
            'dns_list': settings.EXTERNAL_DNS
        }
        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.show_step(6)
        self.show_step(7)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['cinder']
            }
        )
        self.show_step(8)
        self.fuel_web.update_network_cidr(cluster_id, 'public')

        self.show_step(9)
        self.show_step(10)
        self.show_step(11)
        ceph_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['ceph-osd'],
                                               role_status='pending_roles')
        for ceph_node in ceph_nodes:
            ceph_image_size = self.fuel_web.\
                update_node_partitioning(ceph_node, node_role='ceph')

        cinder_nodes = self.fuel_web.\
            get_nailgun_cluster_nodes_by_roles(cluster_id, ['cinder'],
                                               role_status='pending_roles')
        for cinder_node in cinder_nodes:
            cinder_image_size = self.fuel_web.\
                update_node_partitioning(cinder_node, node_role='cinder')

        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(13)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.check_ceph_status(cluster_id)
        self.show_step(14)
        self.fuel_web.verify_network(cluster_id)

        for ceph in ceph_nodes:
            checkers.check_ceph_image_size(ceph['ip'], ceph_image_size)

        for cinder in cinder_nodes:
            checkers.check_cinder_image_size(cinder['ip'], cinder_image_size)

        self.show_step(15)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("cinder_ceph_for_images_ephemeral_rados")