コード例 #1
0
    def rollback_automatically_ha_env(self):
        """Rollback manually simple deployed cluster

        Scenario:
            1. Revert snapshot with simple neutron gre ha env
            2. Add raise exception to openstack.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Add 1 cinder node and re-deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        self.fuel_web.modify_python_file(self.env.get_admin_remote(),
                                         "61i \ \ \ \ \ \ \ \ raise errors."
                                         "ExecutedErrorNonZeroExitCode('{0}')"
                                         .format('Some bad error'),
                                         '/var/upgrade/site-packages/'
                                         'fuel_upgrade/engines/'
                                         'openstack.py')
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
                            password=
                            hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.nodes().slaves[0]), timeout=120)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:5])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)

        self.env.bootstrap_nodes(self.env.nodes().slaves[5:6])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-06': ['cinder']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_ha")
コード例 #2
0
ファイル: fuel_actions.py プロジェクト: simudream/fuel-qa
    def upgrade_master_node(self, rollback=False, file_upload=True):
        """This method upgrades master node with current state."""

        with self.admin_remote as master:
            if file_upload:
                checkers.upload_tarball(master, hlp_data.TARBALL_PATH, '/var')
                checkers.check_file_exists(
                    master,
                    os.path.join('/var',
                                 os.path.basename(hlp_data.TARBALL_PATH)))
                checkers.untar(master, os.path.basename(hlp_data.TARBALL_PATH),
                               '/var')

            keystone_pass = hlp_data.KEYSTONE_CREDS['password']
            checkers.run_script(master,
                                '/var',
                                'upgrade.sh',
                                password=keystone_pass,
                                rollback=rollback,
                                exit_code=255 if rollback else 0)
            if not rollback:
                checkers.wait_upgrade_is_done(master,
                                              3000,
                                              phrase='***UPGRADING MASTER NODE'
                                              ' DONE SUCCESSFULLY')
                checkers.check_upgraded_containers(master,
                                                   hlp_data.UPGRADE_FUEL_FROM,
                                                   hlp_data.UPGRADE_FUEL_TO)
            elif rollback:
                checkers.wait_rollback_is_done(master, 3000)
                checkers.check_upgraded_containers(master,
                                                   hlp_data.UPGRADE_FUEL_TO,
                                                   hlp_data.UPGRADE_FUEL_FROM)
            logger.debug("all containers are ok")
コード例 #3
0
ファイル: test_upgrade.py プロジェクト: amit213/fuel-main
    def rollback_automatically_simple_env(self):
        """Rollback automatically simple deployed cluster

        Scenario:
            1. Revert snapshot with simple neutron gre env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded and run OSTf
            6. Add 1 cinder node and re-deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        self.fuel_web.modify_python_file(self.env.get_admin_remote(),
                                         "98i \ \ \ \ \ \ \ \ raise errors."
                                         "ExecutedErrorNonZeroExitCode('{0}')"
                                         .format('Some bad error'),
                                         '/var/upgrade/site-packages/'
                                         'fuel_upgrade/engines/'
                                         'docker_engine.py')
        #we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            should_fail=1,
            failed_test_name=['Create volume and attach it to instance'])
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['cinder']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_simple")
コード例 #4
0
    def rollback_automatically_ha_env(self):
        """Rollback manually simple deployed cluster

        Scenario:
            1. Revert snapshot with simple neutron gre ha env
            2. Add raise exception to openstack.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Add 1 cinder node and re-deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        self.fuel_web.modify_python_file(self.env.get_admin_remote(),
                                         "2i \ \ \ \ 2014.2-6.0: blah-blah",
                                         '/var/upgrade/releases/'
                                         'metadata.yaml')
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
                            password=
                            hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.nodes().slaves[0]), timeout=120)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:5])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)

        self.env.bootstrap_nodes(self.env.nodes().slaves[5:6])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-06': ['cinder']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_ha")
コード例 #5
0
ファイル: test_upgrade.py プロジェクト: astoica1986/fuel-main
    def upgrade_simple_env(self):
        """Upgrade simple deployed cluster with ceph

        Scenario:
            1. Revert snapshot with simple ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Add another compute node
            5. Re-deploy cluster
            6. Run OSTF

        """

        if not self.env.get_virtual_environment().has_snapshot(
                'ceph_multinode_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_multinode_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var',
                            'upgrade.sh', password=
                            hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'],
            user='******', tenant='ceph1', passwd='ceph1')
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=10, networks_count=1, timeout=300)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env")

        self.env.make_snapshot("upgrade_simple")
コード例 #6
0
    def upgrade_ha_one_controller_delete_node(self):
        """Upgrade ha 1 controller deployed cluster with ceph and
           delete node from old cluster

        Scenario:
            1. Revert ceph_ha_one_controller_compact snapshot
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Delete one compute+ceph node
            5. Re-deploy cluster
            6. Run OSTF

        """

        if not self.env.get_virtual_environment().has_snapshot(
                'ceph_ha_one_controller_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_ha_one_controller_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var',
                            'upgrade.sh', password=
                            hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        nailgun_nodes = self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute', 'ceph-osd']}, False, True)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_success(task)
        nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
        wait(
            lambda: self.fuel_web.is_node_discovered(nodes[0]),
            timeout=10 * 60
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
        self.env.make_snapshot("upgrade_ha_one_controller_delete_node")
コード例 #7
0
ファイル: test_upgrade.py プロジェクト: igajsin/fuel-main
    def upgrade_simple_env(self):
        """Upgrade simple deployed cluster

        Scenario:
            1. Revert snapshot with simple sinder env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Add another cinder node
            5. Re-deploy cluster
            6. Run OSTF

        """

        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_simple_cinder'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_simple_cinder")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.UPGRADE_TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       UPGRADE_TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        UPGRADE_TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh')
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 1500,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.assert_cluster_ready(
            'slave-01', smiles_count=8, networks_count=1, timeout=300)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env")

        self.env.make_snapshot("upgrade_simple")
コード例 #8
0
ファイル: test_upgrade.py プロジェクト: igajsin/fuel-main
    def rollback_simple_env(self):
        """Rollback manually simple deployed cluster

        Scenario:
            1. Revert snapshot with simple sinder env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Rollback cluster manually
            5. Check that rollback was successful
            6. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_simple_cinder'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_simple_cinder")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.UPGRADE_TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       UPGRADE_TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        UPGRADE_TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh')
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 1500,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()

        self.fuel_web.manual_rollback(self.env.get_admin_remote(),
                                      hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_manual")
コード例 #9
0
ファイル: test_os_upgrade.py プロジェクト: simudream/fuel-qa
    def upgrade_ha_ceph_for_all_ubuntu_neutron_vlan(self):
        """Upgrade master node ha mode, ceph for all, neutron vlan

        Scenario:
            1. Revert snapshot with ha mode, ceph for all, neutron vlan env
            2. Run upgrade on master
            3. Check that upgrade was successful

        """
        if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE:
            raise SkipTest()

        self.check_run('upgrade_ha_ceph_for_all_ubuntu_neutron_vlan')
        self.env.revert_snapshot("ha_ceph_for_all_ubuntu_neutron_vlan")

        cluster_id = self.fuel_web.get_last_created_cluster()

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(remote,
                                    hlp_data.TARBALL_PATH, '/var')
            checkers.check_file_exists(remote,
                                       os.path.join('/var',
                                                    os.path.basename(
                                                        hlp_data.TARBALL_PATH))
                                       )
            checkers.untar(remote,
                           os.path.basename(hlp_data.
                                            TARBALL_PATH), '/var')
            checkers.run_script(remote,
                                '/var', 'upgrade.sh',
                                password=hlp_data.KEYSTONE_CREDS['password'])
            checkers.wait_upgrade_is_done(remote, 3000,
                                          phrase='*** UPGRADING MASTER NODE'
                                                 ' DONE SUCCESSFULLY')
            checkers.check_upgraded_containers(remote,
                                               hlp_data.UPGRADE_FUEL_FROM,
                                               hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:6])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()

        self.env.make_snapshot("upgrade_ha_ceph_for_all_ubuntu_neutron_vlan",
                               is_make=True)
コード例 #10
0
ファイル: test_os_upgrade.py プロジェクト: simudream/fuel-qa
    def upgrade_ha_ceph_for_all_ubuntu_neutron_vlan(self):
        """Upgrade master node ha mode, ceph for all, neutron vlan

        Scenario:
            1. Revert snapshot with ha mode, ceph for all, neutron vlan env
            2. Run upgrade on master
            3. Check that upgrade was successful

        """
        if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE:
            raise SkipTest()

        self.check_run('upgrade_ha_ceph_for_all_ubuntu_neutron_vlan')
        self.env.revert_snapshot("ha_ceph_for_all_ubuntu_neutron_vlan")

        cluster_id = self.fuel_web.get_last_created_cluster()

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(remote, hlp_data.TARBALL_PATH, '/var')
            checkers.check_file_exists(
                remote,
                os.path.join('/var', os.path.basename(hlp_data.TARBALL_PATH)))
            checkers.untar(remote, os.path.basename(hlp_data.TARBALL_PATH),
                           '/var')
            checkers.run_script(remote,
                                '/var',
                                'upgrade.sh',
                                password=hlp_data.KEYSTONE_CREDS['password'])
            checkers.wait_upgrade_is_done(remote,
                                          3000,
                                          phrase='*** UPGRADING MASTER NODE'
                                          ' DONE SUCCESSFULLY')
            checkers.check_upgraded_containers(remote,
                                               hlp_data.UPGRADE_FUEL_FROM,
                                               hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:6])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()

        self.env.make_snapshot("upgrade_ha_ceph_for_all_ubuntu_neutron_vlan",
                               is_make=True)
コード例 #11
0
    def deploy_ha_after_upgrade(self):
        """Upgrade and deploy new ha cluster

        Scenario:
            1. Revert snapshot with simple ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Re-deploy cluster
            5. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'ceph_multinode_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_multinode_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(),
                                      3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [
            id for id in available_releases_after
            if id not in available_releases_before
        ]
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:9])
        segment_type = 'vlan'
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type
            },
            release_id=added_release[0])
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['controller'],
                'slave-07': ['compute'],
                'slave-08': ['compute'],
                'slave-09': ['cinder']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            remote = self.env.get_ssh_to_remote_by_name('slave-04')
            self.check_upgraded_kernel(self.env.get_admin_remote(), remote)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("deploy_ha_after_upgrade")
コード例 #12
0
    def rollback_automatically_simple_env(self):
        """Rollback automatically simple deployed cluster

        Scenario:
            1. Revert snapshot with simple neutron gre env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded and run OSTf
            6. Add 1 cinder node and re-deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")
        cluster_id = self.fuel_web.get_last_created_cluster()
        remote = self.env.get_ssh_to_remote_by_name('slave-01')
        expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.TARBALL_PATH), '/var')
        #we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True,
                            exit_code=255)
        checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.nodes().slaves[0]),
              timeout=120)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(cluster_id, {'slave-04': ['cinder']}, True,
                                   False)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            remote = self.env.get_ssh_to_remote_by_name('slave-04')
            kernel = UpgradeFuelMaster.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_simple")
コード例 #13
0
ファイル: test_upgrade.py プロジェクト: anbangr/fuel-qa
    def rollback_automatically_ha(self):
        """Rollback manually ha deployed cluster

        Scenario:
            1. Revert snapshot with Neutron GRE HA 6.1 env
            2. Add raise exception to openstack.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Run network verification
            7. Run OSTF
            8. Add 1 cinder node and re-deploy cluster
            9. Run OSTF

        """
        #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0
        if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:5])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[5:6])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-06': ['cinder']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.env.make_snapshot("rollback_automatically_ha")
コード例 #14
0
    def upgrade_ha_env(self):
        """Upgrade ha deployed cluster

        Scenario:
            1. Revert snapshot with neutron gre ha env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Check cluster is operable
            5. Create new simple Vlan cluster
            6. Deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(),
                                      3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:5])
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [
            id for id in available_releases_after
            if id not in available_releases_before
        ]
        self.env.bootstrap_nodes(self.env.nodes().slaves[5:7])
        data = {
            'tenant': 'novaSimpleVlan',
            'user': '******',
            'password': '******'
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE_SIMPLE,
            settings=data,
            release_id=added_release[0])
        self.fuel_web.update_nodes(cluster_id, {
            'slave-06': ['controller'],
            'slave-07': ['compute']
        })
        self.fuel_web.update_vlan_network_fixed(cluster_id,
                                                amount=8,
                                                network_size=32)

        self.fuel_web.deploy_cluster_wait(cluster_id)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name('slave-06')['ip'],
            data['user'], data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=6,
                                           networks_count=8,
                                           timeout=300)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            remote = self.env.get_ssh_to_remote_by_name('slave-06')
            self.check_upgraded_kernel(self.env.get_admin_remote(), remote)
        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("upgrade_ha")
コード例 #15
0
ファイル: test_upgrade.py プロジェクト: anbangr/fuel-qa
    def upgrade_fuel_after_rollback(self):
        """Upgrade Fuel after rollback and deploy new cluster

        Scenario:
            1. Revert deploy_neutron_gre snapshot with 6.1 env
            2. Upgrade with rollback
            3. Run OSTF
            4. Run network verification
            5. Upgrade fuel master
            6. Check upgrading was successful
            7. Deploy 6.1 cluster with 3 nodes and neutron vlan
            8. Run OSTF for new cluster
            9. Run network verification
        """
        #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0
        if not self.env.d_env.has_snapshot('deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")

        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        remote = self.env.d_env.get_admin_remote

        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(remote(), hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(remote(), os.path.basename(hlp_data.TARBALL_PATH),
                       '/var')

        # Upgrade with rollback
        keystone_pass = hlp_data.KEYSTONE_CREDS['password']
        checkers.run_script(remote(), '/var', 'upgrade.sh',
                            password=keystone_pass, rollback=True,
                            exit_code=255)
        checkers.wait_rollback_is_done(remote(), 3000)
        checkers.check_upgraded_containers(remote(), hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        # Upgrade fuel master
        checkers.run_script(remote(), '/var', 'upgrade.sh',
                            password=keystone_pass)
        checkers.wait_upgrade_is_done(remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nailgun_upgrade_migration()

        # Deploy new cluster
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]

        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:6])

        new_cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            release_id=added_release[0],
            mode=hlp_data.DEPLOYMENT_MODE,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': hlp_data.NEUTRON_SEGMENT['vlan']
            }
        )
        self.fuel_web.update_nodes(
            new_cluster_id, {
                'slave-04': ['controller'],
                'slave-05': ['compute'],
                'slave-06': ['cinder']
            }
        )
        self.fuel_web.run_network_verify(new_cluster_id)
        self.fuel_web.deploy_cluster_wait(new_cluster_id)
        self.fuel_web.run_ostf(new_cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.fuel_web.run_network_verify(new_cluster_id)

        self.env.make_snapshot("upgrade_fuel_after_rollback")
コード例 #16
0
ファイル: test_upgrade.py プロジェクト: anbangr/fuel-qa
    def upgrade_ha_one_controller(self):
        """Upgrade ha one controller deployed cluster with ceph

        Scenario:
            1. Revert snapshot with ha one controller ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Add another compute node
            7. Re-deploy cluster
            8. Run OSTF

        """
        if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):
            raise SkipTest()
        self.env.revert_snapshot('ceph_ha_one_controller_compact')

        cluster_id = self.fuel_web.get_last_created_cluster()

        _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
        remote = self.env.d_env.get_ssh_to_remote(_ip)
        expected_kernel = self.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        create_diagnostic_snapshot(
            self.env, "pass", "upgrade_ha_one_controller")

        self.env.make_snapshot("upgrade_ha_one_controller")
コード例 #17
0
ファイル: test_upgrade.py プロジェクト: anbangr/fuel-qa
    def upgrade_ha(self):
        """Upgrade ha deployed cluster

        Scenario:
            1. Revert snapshot with Neutron GRE HA 6.1 env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Create new ha cluster with 1 controller Vlan cluster
            7. Deploy cluster
            8. Run OSTF

        """
        #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0
        if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:5])
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[5:7])
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': 'vlan'
            },
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-06': ['controller'],
                'slave-07': ['compute']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-06')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            logger.debug("ubuntu kernel version"
                         " on new node is {}".format(kernel))
        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.env.make_snapshot("upgrade_ha")
コード例 #18
0
ファイル: test_os_patching.py プロジェクト: Zipfer/fuel-main
    def deploy_and_patch(self):
        """Update OS on reverted env

         Scenario:
            1. Revert  environment
            2. Upload tarball
            3. Check that it uploaded
            4. Extract data
            5. Get available releases
            6. Run upgrade script
            7. Check that new release appears
            8. Put new release into cluster
            9. Run cluster update
            10. Get cluster net configuration
            11. Check that services are restarted
            12. Check that packages are updated
            13. Run OSTF
            14. Create snapshot

        """
        logger.info("snapshot name is {0}".format(self.snapshot))

        if not self.env.get_virtual_environment().has_snapshot(self.snapshot):
            logger.error('There is no shaphot found {0}'.format(self.snapshot))
            raise SkipTest('Can not find snapshot {0}'.format(self.snapshot))

        #  1. Revert  environment

        self.env.revert_snapshot(self.snapshot)

        logger.info("Start upload upgrade archive")
        node_ssh = self.env.get_ssh_to_remote(self.fuel_web.admin_node_ip)

        # 2. Upload tarball
        checkers.upload_tarball(
            node_ssh=node_ssh, tar_path=hlp_data.TARBALL_PATH,
            tar_target='/var/tmp')

        logger.info("Archive should upload. "
                    "Lets check that it exists on master node ...")
        #  3. Check that it uploaded
        checkers.check_tarball_exists(node_ssh, os.path.basename(
            hlp_data.TARBALL_PATH), '/var/tmp')

        logger.info("Extract archive to the /var/tmp")

        # 4. Extract data
        checkers.untar(node_ssh, os.path.basename(
            hlp_data.TARBALL_PATH), '/var/tmp')

        logger.info("Get release ids for deployed operation"
                    " system before upgrade.")

        # Get cluster nodes
        nailgun_nodes = [
            self.fuel_web.get_nailgun_node_by_devops_node(node)
            for node in self.env.nodes().slaves
            if self.fuel_web.get_nailgun_node_by_devops_node(node)]

        # Try to remember installed nova-packages before update
        p_version_before = {}
        for node in nailgun_nodes:
            remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
            res = checkers.get_package_versions_from_node(
                remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
            p_version_before[node["devops_name"]] = res

        # 5. Get available releases
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        logger.info('Time to run upgrade...')

        # 6. Run upgrade script

        checkers.run_script(node_ssh, '/var/tmp', 'upgrade.sh', password=
                            hlp_data.KEYSTONE_CREDS['password'])
        logger.info('Check if the upgrade complete.')

        checkers.wait_upgrade_is_done(node_ssh=node_ssh,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY',
                                      timeout=600 * 10)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        logger.info('release ids list after upgrade is {0}'.format(
            available_releases_after))
        # 7. Check that new release appears
        assert_true(
            len(available_releases_after) > len(available_releases_before),
            "There is no new release, release ids before {0},"
            " release ids after {1}". format(
                available_releases_before, available_releases_after))

        release_version = hlp_data.RELEASE_VERSION
        logger.debug("Release version is {0}".format(release_version))

        if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
            res = utils.get_yaml_to_json(
                node_ssh,
                '/etc/puppet/{0}/manifests/ubuntu-versions.yaml'.format(
                    release_version))
            res_packages = json.loads(res[0])
            logger.debug('what we have in res_packages {0}'.format(
                res_packages))
        else:
            res = utils.get_yaml_to_json(
                node_ssh,
                '/etc/puppet/{0}/manifests/centos-versions.yaml'.format(
                    release_version))
            res_packages = json.loads(res[0])
            logger.debug('what we have in res_packages {0}'.format(
                res_packages))

        cluster_id = self.fuel_web.get_last_created_cluster()
        logger.debug("Cluster id is {0}".format(cluster_id))

        # 8. Put new release into cluster
        if release_version:
            added_release = self.fuel_web.get_releases_list_for_os(
                release_name=hlp_data.OPENSTACK_RELEASE,
                release_version=release_version)
            logger.debug("Does we have here release id ? {0}".format(
                release_version))
        else:
            added_release = [id for id in available_releases_after
                             if id not in available_releases_before]

        # get nova pids on controller before update
        ssh_to_controller = self.fuel_web.get_ssh_for_node(
            [n["devops_name"] for n in nailgun_nodes
             if 'controller' in n['roles']][0])

        nova_controller_services = ['nova-api', 'nova-cert',
                                    'nova-objectstore', 'nova-conductor',
                                    'nova-scheduler']

        nova_pids_before = utils.nova_service_get_pid(
            ssh_to_controller, nova_controller_services)

        logger.debug('Nova pids on controller before {0}'.format(
            nova_pids_before))

        # 9. Run cluster update
        self.fuel_web.update_cluster(
            cluster_id=cluster_id,
            data={
                'pending_release_id': added_release[0],
                'release_id': self.fuel_web.get_cluster_release_id(
                    cluster_id)})

        logger.info('Huh all preparation for update are done.'
                    'It is time to update cluster.')

        self.fuel_web.run_update(cluster_id=cluster_id,
                                 timeout=hlp_data.UPDATE_TIMEOUT, interval=20)

        # 10. Get cluster net configuration

        cluster_net = self.fuel_web.client.get_cluster(
            cluster_id)['net_provider']

        logger.debug('cluster net is {0}'.format(cluster_net))

        # 11. Check is services are restarted
        if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
            utils.check_if_service_restarted_ubuntu(
                ssh_to_controller, ["keystone'",
                                    "glance-registry'",
                                    "glance-api'",
                                    "heat-api-cfn'",
                                    "heat-engine'",
                                    "heat-api'",
                                    "heat-api-cloudwatch'"])
        else:
            utils.check_if_service_restarted_centos(
                ssh_to_controller, ["keystone",
                                    "glance-registry",
                                    "glance-api",
                                    "heat-api-cfn",
                                    "heat-engine",
                                    "heat-api",
                                    "heat-api-cloudwatch",
                                    "nova-novncproxy"])

        # get nova pids on controller after update
        nova_pids_after = utils.nova_service_get_pid(
            ssh_to_controller, nova_controller_services)

        logger.debug('Nova pids on controller before {0}'.format(
            nova_pids_before))

        assert_not_equal(nova_pids_before, nova_pids_after)

        # 12. Check is packages are updated

        if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
            for package in packages_fixture.dep:
                packages_fixture.dep[package] = res_packages[package]
            logger.debug("Current state of dict is {0}".format(
                packages_fixture.dep))
            for key in packages_fixture.dep:
                res = checkers.get_package_versions_from_node(
                    ssh_to_controller, name=key, os_type='Ubuntu')
                logger.debug('res_from_node is {0}'.format(res))
                assert_true(
                    packages_fixture.dep[key] in res,
                    "Wrong version of package {0}. "
                    "Should be {1} but get {2}".format(
                        key, packages_fixture.dep[key], res))
        else:
            for package in packages_fixture.rpm:
                packages_fixture.rpm[package] = res_packages[package]
            logger.debug("Current state of dict is {0}".format(
                packages_fixture.rpm))
            for key in packages_fixture.rpm:
                res = checkers.get_package_versions_from_node(
                    ssh_to_controller, name=key,
                    os_type=hlp_data.OPENSTACK_RELEASE)
                assert_true(
                    packages_fixture.rpm[key] in res,
                    "Wrong version of package {0}. "
                    "Should be {1} but get {2}".format(
                        key, packages_fixture.rpm[key], res))
        p_version_after = {}
        for node in nailgun_nodes:
            remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
            res = checkers.get_package_versions_from_node(
                remote=remote, name="openstack",
                os_type=hlp_data.OPENSTACK_RELEASE)
            p_version_after[node["devops_name"]] = res

        logger.info("packages before {0}".format(p_version_before))
        logger.info("packages after {0}".format(p_version_after))
        assert_true(p_version_before != p_version_after)

        # 13. Run OSTF
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        # 14. Create snapshot
        self.env.make_snapshot('{0}_and_patch'.format(self.snapshot))
コード例 #19
0
ファイル: test_upgrade.py プロジェクト: aglarendil/fuel-qa
    def upgrade_ha_one_controller_env(self):
        """Upgrade ha one controller deployed cluster with ceph

        Scenario:
            1. Revert snapshot with ha one controller ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Add another compute node
            7. Re-deploy cluster
            8. Run OSTF

        """

        # For upgrade jobs *from* 6.1, change snapshot name to
        # "ceph_ha_one_controller_compact"
        if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
            raise SkipTest()
        self.env.revert_snapshot("ceph_multinode_compact")

        cluster_id = self.fuel_web.get_last_created_cluster()

        _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
        remote = self.env.d_env.get_ssh_to_remote(_ip)
        expected_kernel = self.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id),
            user='******', tenant='ceph1', passwd='ceph1')
        self.fuel_web.assert_cluster_ready(
            os_conn,
            smiles_count=7 if hlp_data.NEUTRON_ENABLE else 10,
            networks_count=2 if hlp_data.NEUTRON_ENABLE else 1,
            timeout=300)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        create_diagnostic_snapshot(
            self.env, "pass", "upgrade_ha_one_controller_env")

        self.env.make_snapshot("upgrade_ha_one_controller")
コード例 #20
0
    def upgrade_second_stage(self):
        """Upgrade master second time with 2 available clusters

        Scenario:
            1. Revert snapshot upgrade_first_stage
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification on both clusters
            5. Run OSTF on both clusters
            6. Add 1 compute node to both clusters and
               re-deploy them one by one
            7. Run network verification on both clusters
            8. Run OSTF on both clusters

        """
        if not self.env.revert_snapshot('upgrade_first_stage'):
            raise SkipTest()

        remote = self.env.d_env.get_admin_remote()
        remote.execute("rm -rf /var/*upgrade*")

        checkers.upload_tarball(remote,
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(remote,
                                   os.path.basename(hlp_data.TARBALL_PATH),
                                   '/var')
        checkers.untar(remote,
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(remote,
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(remote, 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(remote,
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:6])
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[6:8])

        cluster_ids = [cluster['id']
                       for cluster in self.fuel_web.client.list_clusters()]
        for cluster_id in cluster_ids:
            self.fuel_web.verify_network(cluster_id)
            self.fuel_web.run_ostf(cluster_id=cluster_id)
        first_cluster_id = sorted(cluster_ids)[0]
        second_cluster_id = sorted(cluster_ids)[1]
        logger.debug("first cluster id {0}, second cluster id"
                     " {1}".format(first_cluster_id, second_cluster_id))

        self.fuel_web.update_nodes(
            first_cluster_id, {'slave-07': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(first_cluster_id)
        self.fuel_web.verify_network(first_cluster_id)
        self.fuel_web.run_ostf(cluster_id=first_cluster_id)

        self.fuel_web.update_nodes(
            second_cluster_id, {'slave-08': ['compute', 'ceph-osd']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(second_cluster_id)
        self.fuel_web.verify_network(second_cluster_id)
        self.fuel_web.run_ostf(cluster_id=second_cluster_id)
コード例 #21
0
ファイル: test_upgrade.py プロジェクト: aglarendil/fuel-qa
    def deploy_ha_after_upgrade(self):
        """Upgrade and deploy new ha cluster

        Scenario:
            1. Revert snapshot with ha 1 controller ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Re-deploy cluster
            7. Run OSTF

        """

        # For upgrade jobs *from* 6.1, change snapshot name to
        # "ceph_ha_one_controller_compact"
        if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
            raise SkipTest()
        self.env.revert_snapshot("ceph_multinode_compact")

        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.TARBALL_PATH),
                       '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:9])
        segment_type = 'vlan'
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type
            },
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['controller'],
                'slave-07': ['compute'],
                'slave-08': ['compute'],
                'slave-09': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            logger.debug("ubuntu kernel version"
                         " on new node is {}".format(kernel))
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("deploy_ha_after_upgrade")
コード例 #22
0
ファイル: test_upgrade.py プロジェクト: aglarendil/fuel-qa
    def upgrade_ha_one_controller_delete_node(self):
        """Upgrade ha 1 controller deployed cluster with ceph and
           delete node from old cluster

        Scenario:
            1. Revert ceph_ha_one_controller_compact snapshot
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Delete one compute+ceph node
            7. Re-deploy cluster
            8. Run OSTF

        """

        # For upgrade jobs *from* 6.1, change snapshot name to
        # "ceph_ha_one_controller_compact"
        if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
            raise SkipTest()
        self.env.revert_snapshot("ceph_multinode_compact")

        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        remote_ceph = self.fuel_web.get_ssh_for_node('slave-03')
        self.fuel_web.prepare_ceph_to_delete(remote_ceph)
        nailgun_nodes = self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute', 'ceph-osd']}, False, True)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_success(task)
        nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
        try:
            wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,
                 timeout=5 * 60)
        except TimeoutError:
            assert_true(len(self.fuel_web.client.list_nodes()) == 3,
                        'Node {0} is not discovered in timeout 10 *60'.format(
                            nodes[0]))
        self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
        self.env.make_snapshot("upgrade_ha_one_controller_delete_node")
コード例 #23
0
ファイル: test_upgrade.py プロジェクト: amit213/fuel-main
    def deploy_ha_after_upgrade(self):
        """Upgrade and deploy new ha cluster

        Scenario:
            1. Revert snapshot with simple ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Re-deploy cluster
            5. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'ceph_multinode_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_multinode_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.TARBALL_PATH),
                       '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh')
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:9])
        segment_type = 'vlan'
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type
            },
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['controller'],
                'slave-07': ['compute'],
                'slave-08': ['compute'],
                'slave-09': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("deploy_ha_after_upgrade")
コード例 #24
0
    def upgrade_simple_env(self):
        """Upgrade simple deployed cluster with ceph

        Scenario:
            1. Revert snapshot with simple ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Add another compute node
            5. Re-deploy cluster
            6. Run OSTF

        """

        if not self.env.get_virtual_environment().has_snapshot(
                'ceph_multinode_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_multinode_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        remote = self.env.get_ssh_to_remote_by_name('slave-01')
        expected_kernel = self.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(),
                                      3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(cluster_id, {'slave-04': ['compute']}, True,
                                   False)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'],
            user='******',
            tenant='ceph1',
            passwd='ceph1')
        self.fuel_web.assert_cluster_ready(os_conn,
                                           smiles_count=10,
                                           networks_count=1,
                                           timeout=300)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            remote = self.env.get_ssh_to_remote_by_name('slave-04')
            kernel = self.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env")

        self.env.make_snapshot("upgrade_simple")
コード例 #25
0
ファイル: test_upgrade.py プロジェクト: anbangr/fuel-qa
    def upgrade_ha_restart_containers(self):
        """Upgrade ha deployed cluster and restart containers

        Scenario:
            1. Revert snapshot with Neutron GRE HA 6.1 env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run patching and restart containers
            5. Run network verification
            6. Run OSTF
            7. Create new ha cluster with 1 controller Neutron Vlan cluster
            8. Deploy cluster
            9. Run OSTF

        """
        #(ddmitriev)TODO: change the snapshot name to actual when reverting 7.0
        if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.join('/var',
                                                os.path.basename(
                                                    hlp_data.TARBALL_PATH)))
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')

        # Upgrade
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:5])
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        remote = self.env.d_env.get_admin_remote()

        # Patching
        update_command = 'yum update -y'
        update_result = remote.execute(update_command)
        logger.debug('Result of "{1}" command on master node: '
                     '{0}'.format(update_result, update_command))
        assert_equal(int(update_result['exit_code']), 0,
                     'Packages update failed, '
                     'inspect logs for details')

        # Restart containers
        destroy_command = 'dockerctl destroy all'
        destroy_result = remote.execute(destroy_command)
        logger.debug('Result of "{1}" command on master node: '
                     '{0}'.format(destroy_result, destroy_command))
        assert_equal(int(destroy_result['exit_code']), 0,
                     'Destroy containers failed, '
                     'inspect logs for details')

        start_command = 'dockerctl start all'
        start_result = remote.execute(start_command)
        logger.debug('Result of "{1}" command on master node: '
                     '{0}'.format(start_result, start_command))
        assert_equal(int(start_result['exit_code']), 0,
                     'Start containers failed, '
                     'inspect logs for details')
        self.env.docker_actions.wait_for_ready_containers()
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        # Deploy new cluster
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]

        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[5:7])

        new_cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            release_id=added_release[0],
            mode=hlp_data.DEPLOYMENT_MODE,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': 'vlan'
            }
        )
        self.fuel_web.update_nodes(
            new_cluster_id,
            {
                'slave-06': ['controller'],
                'slave-07': ['compute']
            }
        )
        self.fuel_web.run_network_verify(new_cluster_id)
        self.fuel_web.deploy_cluster_wait(new_cluster_id)
        self.fuel_web.run_ostf(new_cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])
        self.fuel_web.run_network_verify(new_cluster_id)

        self.env.make_snapshot("upgrade_ha_restart_containers")
コード例 #26
0
    def upgrade_first_stage(self):
        """Upgrade ha one controller deployed cluster and deploy new one

        Scenario:
            1. Revert snapshot with neutron ha one controller
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Deploy new ceph ha one controller neutron vlan custer
            7. Run network verification
            8. Run OSTF

        """
        if not self.env.revert_snapshot('prepare_upgrade_env'):
            raise SkipTest()

        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_file_exists(self.env.d_env.get_admin_remote(),
                                   os.path.basename(hlp_data.TARBALL_PATH),
                                   '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:6])
        data = {
            'tenant': 'upgrade_first_stage',
            'user': '******',
            'password': '******',
            'net_provider': 'neutron',
            'net_segment_type': 'vlan',
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False
        }
        cluster_id = self.fuel_web.create_cluster(
            name='first_stage_upgrade',
            mode=hlp_data.DEPLOYMENT_MODE,
            settings=data,
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['compute', 'ceph-osd'],
                'slave-06': ['compute', 'ceph-osd']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("upgrade_first_stage", is_make=True)
コード例 #27
0
ファイル: test_upgrade.py プロジェクト: Zipfer/fuel-main
    def rollback_automatically_simple_env(self):
        """Rollback automatically simple deployed cluster

        Scenario:
            1. Revert snapshot with simple neutron gre env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded and run OSTf
            6. Add 1 cinder node and re-deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")
        cluster_id = self.fuel_web.get_last_created_cluster()
        remote = self.env.get_ssh_to_remote_by_name('slave-01')
        expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        self.fuel_web.modify_python_file(self.env.get_admin_remote(),
                                         "2i \ \ \ \ 2014.2-6.0: blah-blah",
                                         '/var/upgrade/releases/'
                                         'metadata.yaml')
        #we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
                            password=
                            hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.nodes().slaves[0]), timeout=120)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['cinder']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            remote = self.env.get_ssh_to_remote_by_name('slave-04')
            kernel = UpgradeFuelMaster.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_simple")
コード例 #28
0
ファイル: test_upgrade.py プロジェクト: aglarendil/fuel-qa
    def upgrade_ha_env(self):
        """Upgrade ha deployed cluster

        Scenario:
            1. Revert snapshot with neutron gre ha env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Create new ha cluster with 1 controller Vlan cluster
            7. Deploy cluster
            8. Run OSTF

        """
        if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:5])
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[5:7])
        data = {
            'tenant': 'novaSimpleVlan',
            'user': '******',
            'password': '******'
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE,
            settings=data,
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-06': ['controller'],
                'slave-07': ['compute']
            }
        )
        self.fuel_web.update_vlan_network_fixed(
            cluster_id, amount=8, network_size=32)

        self.fuel_web.deploy_cluster_wait(cluster_id)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id),
            data['user'], data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=6, networks_count=8, timeout=300)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-06')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            logger.debug("ubuntu kernel version"
                         " on new node is {}".format(kernel))
        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("upgrade_ha")
コード例 #29
0
ファイル: test_upgrade.py プロジェクト: amit213/fuel-main
    def upgrade_ha_env(self):
        """Upgrade ha deployed cluster

        Scenario:
            1. Revert snapshot with neutron gre ha env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Check cluster is operable
            5. Create new simple Vlan cluster
            6. Deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh')
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(self.env.nodes().slaves[5:7])
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE_SIMPLE,
            settings={
                'tenant': 'novaSimpleVlan',
                'user': '******',
                'password': '******'
            },
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-06': ['controller'],
                'slave-07': ['compute']
            }
        )
        self.fuel_web.update_vlan_network_fixed(
            cluster_id, amount=8, network_size=32)
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.assert_cluster_ready(
            'slave-06', smiles_count=6, networks_count=8, timeout=300)
        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("upgrade_ha")
コード例 #30
0
ファイル: test_upgrade.py プロジェクト: aglarendil/fuel-qa
    def rollback_automatically_ha_one_controller_env(self):
        """Rollback automatically ha one controller deployed cluster

        Scenario:
            1. Revert snapshot with deploy neutron gre env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Run network verification
            7. Run OSTF
            8. Add 1 ceph node and re-deploy cluster
            9. Run OSTF

        """
        if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_multinode_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()

        _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
        remote = self.env.d_env.get_ssh_to_remote(_ip)
        expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        # we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['ceph-osd']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = UpgradeFuelMaster.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_ha_one_controller")
コード例 #31
0
ファイル: test_os_patching.py プロジェクト: igajsin/fuel-main
    def deploy_and_patch(self):
        """Update os on reverted cluster

         Scenario:
            1. Revert  environment
            2. Upload tarball
            3. Check that it uploaded
            4. Extract data
            5. Get available releases
            6. Run update script
            7. Check that new release appears
            8. Put new release into cluster
            9. Run cluster update
            10. Run OSTF
            11. Create snapshot

        """

        if not self.env.get_virtual_environment().has_snapshot(self.snapshot):
            raise SkipTest()

        self.env.revert_snapshot(self.snapshot)

        logger.info("Start upload upgrade archive")
        node_ssh = self.env.get_ssh_to_remote(self.fuel_web.admin_node_ip)
        checkers.upload_tarball(
            node_ssh=node_ssh, tar_path=hlp_data.UPDATE_TARBALL_PATH,
            tar_target='/var/tmp')

        logger.info("Archive should upload. "
                    "Lets check that it exists on master node ...")

        checkers.check_tarball_exists(node_ssh, os.path.basename(
            hlp_data.UPDATE_TARBALL_PATH), '/var/tmp')

        logger.info("Extract archive to the /var/tmp")

        checkers.untar(node_ssh, os.path.basename(
            hlp_data.UPDATE_TARBALL_PATH), '/var/tmp')

        logger.info("Get release ids for deployed operation"
                    " system before upgrade..")

        # Get cluster nodes
        nailgun_nodes = [
            self.fuel_web.get_nailgun_node_by_devops_node(node)
            for node in self.env.nodes().slaves
            if self.fuel_web.get_nailgun_node_by_devops_node(node)]

        logger.info("Find next nodes {0}".format(nailgun_nodes))

        # Try to remember installed nova-packages before update
        p_version_before = {}
        for node in nailgun_nodes:
            remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
            res = checkers.get_package_versions_from_node(
                remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
            p_version_before[node["devops_name"]] = res

        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        logger.info('Available release ids before upgrade {0}'.format(
            available_releases_before))

        logger.info('Time to run upgrade...')

        checkers.run_script(node_ssh, '/var/tmp', 'upgrade.sh')
        logger.info('Check if the upgrade complete..')

        checkers.wait_upgrade_is_done(node_ssh=node_ssh,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY',
                                      timeout=60 * 10)

        logger.info('Get release ids list after upgrade')
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        logger.info('release ids list after upgrade is {0}'.format(
            available_releases_after))

        assert_true(
            len(available_releases_after) > len(available_releases_before),
            "There is no new release, release ids before {0},"
            " release ids after {1}". format(
                available_releases_before, available_releases_after))

        logger.debug("what we have here {0}".format(self.__class__))

        cluster_id = self.fuel_web.get_last_created_cluster()
        logger.debug("Cluster id is {0}".format(cluster_id))

        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]

        self.fuel_web.update_cluster(
            cluster_id=cluster_id,
            data={
                'pending_release_id': added_release[0],
                'release_id': self.fuel_web.get_cluster_release_id(
                    cluster_id)})

        logger.info('Huh all preparation for update are done.'
                    ' It is time to update cluster ...')

        self.fuel_web.run_update(
            cluster_id=cluster_id, timeout=15 * 60, interval=20)

        # Check packages after

        p_version_after = {}
        for node in nailgun_nodes:
            remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
            res = checkers.get_package_versions_from_node(
                remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
            p_version_after[node["devops_name"]] = res

        logger.info("packages after {0}".format(p_version_after))
        logger.info("packages before {0}".format(p_version_before))

        # TODO tleontovich: Add assert for packages, when test repo will avail

        self.fuel_web.run_ostf(cluster_id=cluster_id,)

        self.env.make_snapshot('{0}_and_patch'.format(self.snapshot),
                               is_make=True)
コード例 #32
0
ファイル: test_upgrade.py プロジェクト: aglarendil/fuel-qa
    def rollback_automatically_delete_node(self):
        """Rollback automatically ha one controller deployed cluster
           and delete node from cluster

        Scenario:
            1. Revert snapshot with deploy neutron gre env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Run network verification
            7. Run OSTF
            8. Delete 1 node and re-deploy cluster
            9. Run OSTF

        """
        if not self.env.d_env.has_snapshot('deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")
        cluster_id = self.fuel_web.get_last_created_cluster()

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        # we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        nailgun_nodes = self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute', 'cinder']}, False, True)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_success(task)
        nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
        try:
            wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,
                 timeout=5 * 60)
        except TimeoutError:
            assert_true(len(self.fuel_web.client.list_nodes()) == 3,
                        'Node {0} is not discovered in timeout 10 *60'.format(
                            nodes[0]))
        self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)

        self.env.make_snapshot("rollback_automatically_delete_mode")
コード例 #33
0
    def deploy_and_patch(self):
        """Update OS on reverted env

         Scenario:
            1. Revert  environment
            2. Upload tarball
            3. Check that it uploaded
            4. Extract data
            5. Get available releases
            6. Run upgrade script
            7. Check that new release appears
            8. Put new release into cluster
            9. Run cluster update
            10. Get cluster net configuration
            11. Check that services are restarted
            12. Check that packages are updated
            13. Run OSTF
            14. Create snapshot

        """
        logger.info("snapshot name is {0}".format(self.snapshot))

        if not self.env.get_virtual_environment().has_snapshot(self.snapshot):
            logger.error('There is no shaphot found {0}'.format(self.snapshot))
            raise SkipTest('Can not find snapshot {0}'.format(self.snapshot))

        #  1. Revert  environment

        self.env.revert_snapshot(self.snapshot)

        logger.info("Start upload upgrade archive")
        node_ssh = self.env.get_ssh_to_remote(self.fuel_web.admin_node_ip)

        # 2. Upload tarball
        checkers.upload_tarball(node_ssh=node_ssh,
                                tar_path=hlp_data.TARBALL_PATH,
                                tar_target='/var/tmp')

        logger.info("Archive should upload. "
                    "Lets check that it exists on master node ...")
        #  3. Check that it uploaded
        checkers.check_tarball_exists(node_ssh,
                                      os.path.basename(hlp_data.TARBALL_PATH),
                                      '/var/tmp')

        logger.info("Extract archive to the /var/tmp")

        # 4. Extract data
        checkers.untar(node_ssh, os.path.basename(hlp_data.TARBALL_PATH),
                       '/var/tmp')

        logger.info("Get release ids for deployed operation"
                    " system before upgrade..")

        # Get cluster nodes
        nailgun_nodes = [
            self.fuel_web.get_nailgun_node_by_devops_node(node)
            for node in self.env.nodes().slaves
            if self.fuel_web.get_nailgun_node_by_devops_node(node)
        ]

        # Try to remember installed nova-packages before update
        p_version_before = {}
        for node in nailgun_nodes:
            remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
            res = checkers.get_package_versions_from_node(
                remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
            p_version_before[node["devops_name"]] = res

        # 5. Get available releases
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        logger.info('Time to run upgrade...')

        # 6. Run upgrade script

        checkers.run_script(node_ssh,
                            '/var/tmp',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        logger.info('Check if the upgrade complete..')

        checkers.wait_upgrade_is_done(node_ssh=node_ssh,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY',
                                      timeout=600 * 10)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        logger.info('release ids list after upgrade is {0}'.format(
            available_releases_after))
        # 7. Check that new release appears
        assert_true(
            len(available_releases_after) > len(available_releases_before),
            "There is no new release, release ids before {0},"
            " release ids after {1}".format(available_releases_before,
                                            available_releases_after))

        if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
            res = utils.get_yaml_to_json(
                node_ssh,
                '/etc/puppet/{0}/manifests/ubuntu-versions.yaml'.format(
                    hlp_data.RELEASE_VERSION))
            res_packages = json.loads(res[0])
            logger.debug(
                'what we have in res_packages {0}'.format(res_packages))
        else:
            res = utils.get_yaml_to_json(
                node_ssh,
                '/etc/puppet/{0}/manifests/centos-versions.yaml'.format(
                    hlp_data.RELEASE_VERSION))
            res_packages = json.loads(res[0])
            logger.debug(
                'what we have in res_packages {0}'.format(res_packages))

        cluster_id = self.fuel_web.get_last_created_cluster()
        logger.debug("Cluster id is {0}".format(cluster_id))

        release_version = hlp_data.RELEASE_VERSION
        logger.debug("Release version is {0}".format(release_version))

        # 8. Put new release into cluster
        if release_version:
            added_release = self.fuel_web.get_releases_list_for_os(
                release_name=hlp_data.OPENSTACK_RELEASE,
                release_version=release_version)
            logger.debug(
                "Does we have here release id ? {0}".format(release_version))
        else:
            added_release = [
                id for id in available_releases_after
                if id not in available_releases_before
            ]

        # get nova pids on controller before update
        ssh_to_controller = self.fuel_web.get_ssh_for_node([
            n["devops_name"] for n in nailgun_nodes
            if 'controller' in n['roles']
        ][0])

        nova_controller_services = [
            'nova-api', 'nova-cert', 'nova-objectstore', 'nova-conductor',
            'nova-scheduler'
        ]

        nova_pids_before = utils.nova_service_get_pid(
            ssh_to_controller, nova_controller_services)

        logger.debug(
            'Nova pids on controller before {0}'.format(nova_pids_before))

        # 9. Run cluster update
        self.fuel_web.update_cluster(
            cluster_id=cluster_id,
            data={
                'pending_release_id': added_release[0],
                'release_id': self.fuel_web.get_cluster_release_id(cluster_id)
            })

        logger.info('Huh all preparation for update are done.'
                    ' It is time to update cluster ...')

        self.fuel_web.run_update(cluster_id=cluster_id,
                                 timeout=hlp_data.UPDATE_TIMEOUT,
                                 interval=20)

        # 10. Get cluster net configuration

        cluster_net = self.fuel_web.client.get_cluster(
            cluster_id)['net_provider']

        logger.debug('cluster net is {0}'.format(cluster_net))

        # 11. Check is services are restarted
        if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
            utils.check_if_service_restarted_ubuntu(ssh_to_controller, [
                "keystone'", "glance-registry'", "glance-api'",
                "heat-api-cfn'", "heat-engine'", "heat-api'",
                "heat-api-cloudwatch'"
            ])
        else:
            utils.check_if_service_restarted_centos(ssh_to_controller, [
                "keystone", "glance-registry", "glance-api", "heat-api-cfn",
                "heat-engine", "heat-api", "heat-api-cloudwatch",
                "nova-novncproxy"
            ])

        # get nova pids on controller after update
        nova_pids_after = utils.nova_service_get_pid(ssh_to_controller,
                                                     nova_controller_services)

        logger.debug(
            'Nova pids on controller before {0}'.format(nova_pids_before))

        assert_not_equal(nova_pids_before, nova_pids_after)

        # 12. Check is packages are updated

        if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
            for package in packages_fixture.dep:
                packages_fixture.dep[package] = res_packages[package]
                logger.debug("Current state of dict is {0}".format(
                    packages_fixture.dep))
            for key in packages_fixture.dep:
                res = checkers.get_package_versions_from_node(
                    ssh_to_controller, name=key, os_type='Ubuntu')
                logger.debug('res_from_node is {0}'.format(res))
                assert_true(
                    packages_fixture.dep[key] in res,
                    "Wrong version of package {0}. "
                    "Should be {1} but get {2}".format(
                        key, packages_fixture.dep[key], res))
        else:
            for package in packages_fixture.rpm:
                packages_fixture.rpm[package] = res_packages[package]
                logger.debug("Current state of dict is")
            for key in packages_fixture.rpm:
                res = checkers.get_package_versions_from_node(
                    ssh_to_controller,
                    name=key,
                    os_type=hlp_data.OPENSTACK_RELEASE)
                assert_true(
                    packages_fixture.rpm[key] in res,
                    "Wrong version of package {0}. "
                    "Should be {1} but get {2}".format(
                        key, packages_fixture.rpm[key], res))
        p_version_after = {}
        for node in nailgun_nodes:
            remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
            res = checkers.get_package_versions_from_node(
                remote=remote,
                name="openstack",
                os_type=hlp_data.OPENSTACK_RELEASE)
            p_version_after[node["devops_name"]] = res

        logger.info("packages after {0}".format(p_version_after))
        logger.info("packages before {0}".format(p_version_before))

        assert_true(p_version_before != p_version_after)

        # 13. Run OSTF
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        # 14. Create snapshot
        self.env.make_snapshot('{0}_and_patch'.format(self.snapshot))