Пример #1
0
    def rollback_automatically_ha_env(self):
        """Rollback manually simple deployed cluster

        Scenario:
            1. Revert snapshot with simple neutron gre ha env
            2. Add raise exception to openstack.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Add 1 cinder node and re-deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        self.fuel_web.modify_python_file(self.env.get_admin_remote(),
                                         "61i \ \ \ \ \ \ \ \ raise errors."
                                         "ExecutedErrorNonZeroExitCode('{0}')"
                                         .format('Some bad error'),
                                         '/var/upgrade/site-packages/'
                                         'fuel_upgrade/engines/'
                                         'openstack.py')
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
                            password=
                            hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.nodes().slaves[0]), timeout=120)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:5])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)

        self.env.bootstrap_nodes(self.env.nodes().slaves[5:6])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-06': ['cinder']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_ha")
Пример #2
0
    def _prepare_contrail_plugin(self, slaves=None, pub_net=False):
        """Copy necessary packages to the master node and install them"""

        self.env.revert_snapshot("ready_with_%d_slaves" % slaves)

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugin to the master node
            checkers.upload_tarball(
                remote,
                CONTRAIL_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))

            # copy additional packages to the master node
            self._upload_contrail_packages(remote)

            # install packages
            self._install_packages(remote)

        # prepare fuel
        self._assign_net_provider(pub_net)
Пример #3
0
    def upgrade_master_node(self, rollback=False, file_upload=True):
        """This method upgrades master node with current state."""
        # TODO: It will be remooved or changed

        master = self.admin_remote
        if file_upload:
            checkers.upload_tarball(master, hlp_data.TARBALL_PATH, '/var')
            checkers.check_file_exists(master,
                                       os.path.join(
                                           '/var',
                                           os.path.basename(hlp_data.
                                                            TARBALL_PATH)))
            self.untar(master, os.path.basename(hlp_data.TARBALL_PATH),
                       '/var')

        keystone_pass = hlp_data.KEYSTONE_CREDS['password']
        checkers.run_upgrade_script(master, '/var', 'upgrade.sh',
                                    password=keystone_pass,
                                    rollback=rollback,
                                    exit_code=255 if rollback else 0)
        if not rollback:
            checkers.wait_upgrade_is_done(master, 3000,
                                          phrase='***UPGRADING MASTER NODE'
                                                 ' DONE SUCCESSFULLY')
            checkers.check_upgraded_containers(master,
                                               hlp_data.UPGRADE_FUEL_FROM,
                                               hlp_data.UPGRADE_FUEL_TO)
        elif rollback:
            checkers.wait_rollback_is_done(master, 3000)
            checkers.check_upgraded_containers(master,
                                               hlp_data.UPGRADE_FUEL_TO,
                                               hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
Пример #4
0
def prepare_contrail_plugin(obj, slaves=None, options={}):
    """Copy necessary packages to the master node and install them"""

    obj.env.revert_snapshot("ready_with_%d_slaves" % slaves)

    # copy plugin to the master node
    checkers.upload_tarball(
        obj.env.d_env.get_admin_remote(),
        CONTRAIL_PLUGIN_PATH, '/var')

    # install plugin
    checkers.install_plugin_check_code(
        obj.env.d_env.get_admin_remote(),
        plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))
    # FIXME: when opencontrail v3.0 is available
    # FIXME: remove the following line from 'or True'
    if obj.CONTRAIL_DISTRIBUTION == 'juniper' or True:
        # copy additional packages to the master node
        upload_contrail_packages(obj)

        # install packages
        install_packages(obj, obj.env.d_env.get_admin_remote())

    # prepare fuel
    openstack.assign_net_provider(obj, **options)
Пример #5
0
    def rollback_automatically_simple_env(self):
        """Rollback automatically simple deployed cluster

        Scenario:
            1. Revert snapshot with simple neutron gre env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded and run OSTf
            6. Add 1 cinder node and re-deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        self.fuel_web.modify_python_file(self.env.get_admin_remote(),
                                         "98i \ \ \ \ \ \ \ \ raise errors."
                                         "ExecutedErrorNonZeroExitCode('{0}')"
                                         .format('Some bad error'),
                                         '/var/upgrade/site-packages/'
                                         'fuel_upgrade/engines/'
                                         'docker_engine.py')
        #we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            should_fail=1,
            failed_test_name=['Create volume and attach it to instance'])
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['cinder']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_simple")
Пример #6
0
    def deploy_neutron_lbaas_simple(self):
        """Deploy cluster in simple mode with LbaaS plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check health of lbaas agent on the node
            9. Create pool and vip
            10. Run OSTF

        Duration 35m
        Snapshot deploy_neutron_vlan_lbaas_simple

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(remote, LBAAS_PLUGIN_PATH, "/var")

            # install plugin

            checkers.install_plugin_check_code(remote, plugin=os.path.basename(LBAAS_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, mode=DEPLOYMENT_MODE_SIMPLE)

        plugin_name = "lbaas"
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        logger.debug("we have lbaas element")
        options = {"metadata/enabled": True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {"slave-01": ["controller"], "slave-02": ["compute"], "slave-03": ["compute"]}
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        asserts.assert_equal(str(cluster["net_provider"]), "neutron")

        self.fuel_web.verify_network(cluster_id)

        controller = self.fuel_web.get_nailgun_node_by_name("slave-01")
        os_conn = os_actions.OpenStackActions(controller["ip"])

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbaas_work(os_conn)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_vlan_lbaas_simple")
Пример #7
0
    def upload_plugin(self):
        """Upload plugin to master node"""
        # copy plugin to the master node
        assert_true(self.plugin_path, "plugin_path is not specified")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote,
                self.plugin_path, '/var')
Пример #8
0
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, conf.ZABBIX_PLUGIN_PATH, "/var")
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.ZABBIX_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ha")
Пример #9
0
    def upgrade_simple_env(self):
        """Upgrade simple deployed cluster with ceph

        Scenario:
            1. Revert snapshot with simple ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Add another compute node
            5. Re-deploy cluster
            6. Run OSTF

        """

        if not self.env.get_virtual_environment().has_snapshot(
                'ceph_multinode_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_multinode_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var',
                            'upgrade.sh', password=
                            hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'],
            user='******', tenant='ceph1', passwd='ceph1')
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=10, networks_count=1, timeout=300)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env")

        self.env.make_snapshot("upgrade_simple")
Пример #10
0
def install_dvs_plugin(master_node):
    """Download and instal DVS plugin on master node."""
    # copy plugins to the master node
    checkers.upload_tarball(
        master_node,
        DVS_PLUGIN_PATH, "/var")

    # install plugin
    checkers.install_plugin_check_code(
        master_node,
        plugin=os.path.basename(DVS_PLUGIN_PATH))
Пример #11
0
    def upgrade_ha_one_controller_delete_node(self):
        """Upgrade ha 1 controller deployed cluster with ceph and
           delete node from old cluster

        Scenario:
            1. Revert ceph_ha_one_controller_compact snapshot
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Delete one compute+ceph node
            5. Re-deploy cluster
            6. Run OSTF

        """

        if not self.env.get_virtual_environment().has_snapshot(
                'ceph_ha_one_controller_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_ha_one_controller_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var',
                            'upgrade.sh', password=
                            hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        nailgun_nodes = self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute', 'ceph-osd']}, False, True)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_success(task)
        nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
        wait(
            lambda: self.fuel_web.is_node_discovered(nodes[0]),
            timeout=10 * 60
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
        self.env.make_snapshot("upgrade_ha_one_controller_delete_node")
Пример #12
0
    def upgrade_simple_env(self):
        """Upgrade simple deployed cluster

        Scenario:
            1. Revert snapshot with simple sinder env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Add another cinder node
            5. Re-deploy cluster
            6. Run OSTF

        """

        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_simple_cinder'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_simple_cinder")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.UPGRADE_TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       UPGRADE_TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        UPGRADE_TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh')
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 1500,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.assert_cluster_ready(
            'slave-01', smiles_count=8, networks_count=1, timeout=300)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env")

        self.env.make_snapshot("upgrade_simple")
Пример #13
0
    def rollback_simple_env(self):
        """Rollback manually simple deployed cluster

        Scenario:
            1. Revert snapshot with simple sinder env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Rollback cluster manually
            5. Check that rollback was successful
            6. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_simple_cinder'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_simple_cinder")
        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.UPGRADE_TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       UPGRADE_TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        UPGRADE_TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh')
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 1500,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()

        self.fuel_web.manual_rollback(self.env.get_admin_remote(),
                                      hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_manual")
Пример #14
0
    def upgrade_ha_ceph_for_all_ubuntu_neutron_vlan(self):
        """Upgrade master node ha mode, ceph for all, neutron vlan

        Scenario:
            1. Revert snapshot with ha mode, ceph for all, neutron vlan env
            2. Run upgrade on master
            3. Check that upgrade was successful

        """
        if hlp_data.OPENSTACK_RELEASE_UBUNTU not in hlp_data.OPENSTACK_RELEASE:
            raise SkipTest()

        self.check_run('upgrade_ha_ceph_for_all_ubuntu_neutron_vlan')
        self.env.revert_snapshot("ha_ceph_for_all_ubuntu_neutron_vlan")

        cluster_id = self.fuel_web.get_last_created_cluster()

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(remote,
                                    hlp_data.TARBALL_PATH, '/var')
            checkers.check_file_exists(remote,
                                       os.path.join('/var',
                                                    os.path.basename(
                                                        hlp_data.TARBALL_PATH))
                                       )
            checkers.untar(remote,
                           os.path.basename(hlp_data.
                                            TARBALL_PATH), '/var')
            checkers.run_script(remote,
                                '/var', 'upgrade.sh',
                                password=hlp_data.KEYSTONE_CREDS['password'])
            checkers.wait_upgrade_is_done(remote, 3000,
                                          phrase='*** UPGRADING MASTER NODE'
                                                 ' DONE SUCCESSFULLY')
            checkers.check_upgraded_containers(remote,
                                               hlp_data.UPGRADE_FUEL_FROM,
                                               hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:6])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()

        self.env.make_snapshot("upgrade_ha_ceph_for_all_ubuntu_neutron_vlan",
                               is_make=True)
Пример #15
0
def prepare_test_plugin(
        obj, slaves=None, pub_all_nodes=False, ceph_value=False):
    """Copy necessary packages to the master node and install them"""

    obj.env.revert_snapshot("ready_with_%d_slaves" % slaves)

    # copy plugin to the master node
    checkers.upload_tarball(
        obj.env.d_env.get_admin_remote(),
        PLUGIN_PATH, '/var')

    # install plugin
    checkers.install_plugin_check_code(
        obj.env.d_env.get_admin_remote(),
        plugin=os.path.basename(PLUGIN_PATH))

    # prepare fuel
    openstack.assign_net_provider(obj, pub_all_nodes, ceph_value)
    def _bootstrap(self):

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugin to the master node
            checkers.upload_tarball(
                remote,
                conf.LMA_INFRA_ALERTING_PLUGIN_PATH, "/var")

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        plugin_options = {
            'send_to/value': 'root@localhost',
            'send_from/value': 'nagios@localhost',
            'smtp_host/value': '127.0.0.1',
            'nagios_password/value': self._nagios_password,
        }

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, self._name),
                    msg)
        logger.debug('%s (%s) plugin is installed' % (self._name,
                                                      self._version))
        self.fuel_wb.update_plugin_settings(cluster_id,
                                            self._name,
                                            self._version,
                                            plugin_options)

        return cluster_id
Пример #17
0
    def upgrade_ha_env(self):
        """Upgrade ha deployed cluster

        Scenario:
            1. Revert snapshot with neutron gre ha env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Create new ha cluster with 1 controller Vlan cluster
            7. Deploy cluster
            8. Run OSTF

        """
        if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:5])
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[5:7])
        data = {
            'tenant': 'novaSimpleVlan',
            'user': '******',
            'password': '******'
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE,
            settings=data,
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-06': ['controller'],
                'slave-07': ['compute']
            }
        )
        self.fuel_web.update_vlan_network_fixed(
            cluster_id, amount=8, network_size=32)

        self.fuel_web.deploy_cluster_wait(cluster_id)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id),
            data['user'], data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=6, networks_count=8, timeout=300)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-06')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            logger.debug("ubuntu kernel version"
                         " on new node is {}".format(kernel))
        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("upgrade_ha")
Пример #18
0
    def rollback_automatically_ha_one_controller_env(self):
        """Rollback automatically ha one controller deployed cluster

        Scenario:
            1. Revert snapshot with deploy neutron gre env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Run network verification
            7. Run OSTF
            8. Add 1 ceph node and re-deploy cluster
            9. Run OSTF

        """
        if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_multinode_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()

        _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
        remote = self.env.d_env.get_ssh_to_remote(_ip)
        expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        # we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['ceph-osd']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = UpgradeFuelMaster.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_ha_one_controller")
Пример #19
0
    def deploy_neutron_example_ha_add_node(self):
        """Deploy and scale cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Add 2 nodes with controller role
            11. Deploy cluster
            12. Check plugin health
            13. Run OSTF

        Duration 150m
        Snapshot deploy_neutron_example_ha_add_node

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, EXAMPLE_PLUGIN_PATH, '/var')

            # install plugin

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['tun'],
            }
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-04', 'slave-05'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'

            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(1, len(res_pgrep['stdout']),
                             'Failed with error {0} on the '
                             'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_curl['stderr'], node))

        # add verification here
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha_add_node")
Пример #20
0
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            for plugin in [conf.ZABBIX_PLUGIN_PATH,
                           conf.ZABBIX_SNMP_PLUGIN_PATH]:
                checkers.upload_tarball(
                    remote, plugin, "/var")
                checkers.install_plugin_check_code(
                    remote,
                    plugin=os.path.basename(plugin))

        settings = None

        if conf.NEUTRON_ENABLE:
            settings = {
                "net_provider": "neutron",
                "net_segment_type": conf.NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
            settings=settings
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        cmd = "crm resource status p_zabbix-server"
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            response = remote.execute(cmd)["stdout"][0]
        assert_true("p_zabbix-server is running" in response,
                    "p_zabbix-server resource wasn't found in pacemaker:\n{0}"
                    .format(response))

        public_vip = self.fuel_web.get_public_vip(cluster_id)

        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        screens_html = bs4.BeautifulSoup(zabbix_web.get_screens())
        screens_links = screens_html.find_all('a')
        assert_true(any('charts.php?graphid=' in link.get('href')
                        for link in screens_links),
                    "Zabbix screen page does not contain graphs:\n{0}".
                    format(screens_links))

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0)

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
Пример #21
0
    def deploy_influxdb_grafana_plugin(self):
        """Deploy a cluster with the InfluxDB-Grafana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with influxdb_grafana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_influxdb_grafana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node and install it
        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, INFLUXDB_GRAFANA_PLUGIN_PATH, '/var')
            checkers.install_plugin_check_code(
                remote, plugin=os.path.basename(INFLUXDB_GRAFANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE,
            }
        )

        plugin_name = 'influxdb_grafana'
        options = {
            'metadata/enabled': True,
            'node_name/value': 'slave-03_influxdb_grafana',
            'influxdb_rootpass/value': 'lmapass',
            'influxdb_userpass/value': 'lmapass',
            'grafana_userpass/value': 'lmapass',
        }

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            "Plugin couldn't be enabled. Check plugin version. Test aborted")

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['influxdb_grafana']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        influxdb_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        influxdb_server_ip = influxdb_server.get('ip')
        assert_is_not_none(influxdb_server_ip,
                           "Failed to get the IP of InfluxDB server")

        logger.debug("Check that InfluxDB is ready")

        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        r = requests.get(influxdb_url.format(
            influxdb_server_ip, 'lma', options['influxdb_userpass/value']))
        msg = "InfluxDB responded with {}, expected 200".format(r.status_code)
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the Grafana server is running")

        r = requests.get(
            "http://{0}:{1}@{2}:8000/api/org".format(
                'grafana', options['grafana_userpass/value'],
                influxdb_server_ip))
        msg = "Grafana server responded with {}, expected 200".format(
            r.status_code)
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_influxdb_grafana_plugin")
Пример #22
0
    def deploy_and_patch(self):
        """Update OS on reverted env

         Scenario:
            1. Revert  environment
            2. Upload tarball
            3. Check that it uploaded
            4. Extract data
            5. Get available releases
            6. Run upgrade script
            7. Check that new release appears
            8. Put new release into cluster
            9. Run cluster update
            10. Get cluster net configuration
            11. Check that services are restarted
            12. Check that packages are updated
            13. Run OSTF
            14. Create snapshot

        """
        logger.info("snapshot name is {0}".format(self.snapshot))

        if not self.env.get_virtual_environment().has_snapshot(self.snapshot):
            logger.error('There is no shaphot found {0}'.format(self.snapshot))
            raise SkipTest('Can not find snapshot {0}'.format(self.snapshot))

        #  1. Revert  environment

        self.env.revert_snapshot(self.snapshot)

        logger.info("Start upload upgrade archive")
        node_ssh = self.env.get_ssh_to_remote(self.fuel_web.admin_node_ip)

        # 2. Upload tarball
        checkers.upload_tarball(node_ssh=node_ssh,
                                tar_path=hlp_data.TARBALL_PATH,
                                tar_target='/var/tmp')

        logger.info("Archive should upload. "
                    "Lets check that it exists on master node ...")
        #  3. Check that it uploaded
        checkers.check_tarball_exists(node_ssh,
                                      os.path.basename(hlp_data.TARBALL_PATH),
                                      '/var/tmp')

        logger.info("Extract archive to the /var/tmp")

        # 4. Extract data
        checkers.untar(node_ssh, os.path.basename(hlp_data.TARBALL_PATH),
                       '/var/tmp')

        logger.info("Get release ids for deployed operation"
                    " system before upgrade..")

        # Get cluster nodes
        nailgun_nodes = [
            self.fuel_web.get_nailgun_node_by_devops_node(node)
            for node in self.env.nodes().slaves
            if self.fuel_web.get_nailgun_node_by_devops_node(node)
        ]

        # Try to remember installed nova-packages before update
        p_version_before = {}
        for node in nailgun_nodes:
            remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
            res = checkers.get_package_versions_from_node(
                remote=remote, name="nova", os_type=hlp_data.OPENSTACK_RELEASE)
            p_version_before[node["devops_name"]] = res

        # 5. Get available releases
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        logger.info('Time to run upgrade...')

        # 6. Run upgrade script

        checkers.run_script(node_ssh,
                            '/var/tmp',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        logger.info('Check if the upgrade complete..')

        checkers.wait_upgrade_is_done(node_ssh=node_ssh,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY',
                                      timeout=600 * 10)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)

        logger.info('release ids list after upgrade is {0}'.format(
            available_releases_after))
        # 7. Check that new release appears
        assert_true(
            len(available_releases_after) > len(available_releases_before),
            "There is no new release, release ids before {0},"
            " release ids after {1}".format(available_releases_before,
                                            available_releases_after))

        if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
            res = utils.get_yaml_to_json(
                node_ssh,
                '/etc/puppet/{0}/manifests/ubuntu-versions.yaml'.format(
                    hlp_data.RELEASE_VERSION))
            res_packages = json.loads(res[0])
            logger.debug(
                'what we have in res_packages {0}'.format(res_packages))
        else:
            res = utils.get_yaml_to_json(
                node_ssh,
                '/etc/puppet/{0}/manifests/centos-versions.yaml'.format(
                    hlp_data.RELEASE_VERSION))
            res_packages = json.loads(res[0])
            logger.debug(
                'what we have in res_packages {0}'.format(res_packages))

        cluster_id = self.fuel_web.get_last_created_cluster()
        logger.debug("Cluster id is {0}".format(cluster_id))

        release_version = hlp_data.RELEASE_VERSION
        logger.debug("Release version is {0}".format(release_version))

        # 8. Put new release into cluster
        if release_version:
            added_release = self.fuel_web.get_releases_list_for_os(
                release_name=hlp_data.OPENSTACK_RELEASE,
                release_version=release_version)
            logger.debug(
                "Does we have here release id ? {0}".format(release_version))
        else:
            added_release = [
                id for id in available_releases_after
                if id not in available_releases_before
            ]

        # get nova pids on controller before update
        ssh_to_controller = self.fuel_web.get_ssh_for_node([
            n["devops_name"] for n in nailgun_nodes
            if 'controller' in n['roles']
        ][0])

        nova_controller_services = [
            'nova-api', 'nova-cert', 'nova-objectstore', 'nova-conductor',
            'nova-scheduler'
        ]

        nova_pids_before = utils.nova_service_get_pid(
            ssh_to_controller, nova_controller_services)

        logger.debug(
            'Nova pids on controller before {0}'.format(nova_pids_before))

        # 9. Run cluster update
        self.fuel_web.update_cluster(
            cluster_id=cluster_id,
            data={
                'pending_release_id': added_release[0],
                'release_id': self.fuel_web.get_cluster_release_id(cluster_id)
            })

        logger.info('Huh all preparation for update are done.'
                    ' It is time to update cluster ...')

        self.fuel_web.run_update(cluster_id=cluster_id,
                                 timeout=hlp_data.UPDATE_TIMEOUT,
                                 interval=20)

        # 10. Get cluster net configuration

        cluster_net = self.fuel_web.client.get_cluster(
            cluster_id)['net_provider']

        logger.debug('cluster net is {0}'.format(cluster_net))

        # 11. Check is services are restarted
        if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
            utils.check_if_service_restarted_ubuntu(ssh_to_controller, [
                "keystone'", "glance-registry'", "glance-api'",
                "heat-api-cfn'", "heat-engine'", "heat-api'",
                "heat-api-cloudwatch'"
            ])
        else:
            utils.check_if_service_restarted_centos(ssh_to_controller, [
                "keystone", "glance-registry", "glance-api", "heat-api-cfn",
                "heat-engine", "heat-api", "heat-api-cloudwatch",
                "nova-novncproxy"
            ])

        # get nova pids on controller after update
        nova_pids_after = utils.nova_service_get_pid(ssh_to_controller,
                                                     nova_controller_services)

        logger.debug(
            'Nova pids on controller before {0}'.format(nova_pids_before))

        assert_not_equal(nova_pids_before, nova_pids_after)

        # 12. Check is packages are updated

        if 'Ubuntu' in hlp_data.OPENSTACK_RELEASE:
            for package in packages_fixture.dep:
                packages_fixture.dep[package] = res_packages[package]
                logger.debug("Current state of dict is {0}".format(
                    packages_fixture.dep))
            for key in packages_fixture.dep:
                res = checkers.get_package_versions_from_node(
                    ssh_to_controller, name=key, os_type='Ubuntu')
                logger.debug('res_from_node is {0}'.format(res))
                assert_true(
                    packages_fixture.dep[key] in res,
                    "Wrong version of package {0}. "
                    "Should be {1} but get {2}".format(
                        key, packages_fixture.dep[key], res))
        else:
            for package in packages_fixture.rpm:
                packages_fixture.rpm[package] = res_packages[package]
                logger.debug("Current state of dict is")
            for key in packages_fixture.rpm:
                res = checkers.get_package_versions_from_node(
                    ssh_to_controller,
                    name=key,
                    os_type=hlp_data.OPENSTACK_RELEASE)
                assert_true(
                    packages_fixture.rpm[key] in res,
                    "Wrong version of package {0}. "
                    "Should be {1} but get {2}".format(
                        key, packages_fixture.rpm[key], res))
        p_version_after = {}
        for node in nailgun_nodes:
            remote = self.fuel_web.get_ssh_for_node(node["devops_name"])
            res = checkers.get_package_versions_from_node(
                remote=remote,
                name="openstack",
                os_type=hlp_data.OPENSTACK_RELEASE)
            p_version_after[node["devops_name"]] = res

        logger.info("packages after {0}".format(p_version_after))
        logger.info("packages before {0}".format(p_version_before))

        assert_true(p_version_before != p_version_after)

        # 13. Run OSTF
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        # 14. Create snapshot
        self.env.make_snapshot('{0}_and_patch'.format(self.snapshot))
Пример #23
0
    def rollback_automatically_delete_node(self):
        """Rollback automatically ha one controller deployed cluster
           and delete node from cluster

        Scenario:
            1. Revert snapshot with deploy neutron gre env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded
            6. Run network verification
            7. Run OSTF
            8. Delete 1 node and re-deploy cluster
            9. Run OSTF

        """
        if not self.env.d_env.has_snapshot('deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")
        cluster_id = self.fuel_web.get_last_created_cluster()

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        # we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var',
                            'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.d_env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.d_env.nodes().slaves[0]), timeout=8 * 60)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        nailgun_nodes = self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute', 'cinder']}, False, True)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_success(task)
        nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
        try:
            wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,
                 timeout=5 * 60)
        except TimeoutError:
            assert_true(len(self.fuel_web.client.list_nodes()) == 3,
                        'Node {0} is not discovered in timeout 10 *60'.format(
                            nodes[0]))
        self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)

        self.env.make_snapshot("rollback_automatically_delete_mode")
Пример #24
0
    def deploy_neutron_example_ha(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. check plugin health
            10. Run OSTF

        Duration 70m
        Snapshot deploy_neutron_example_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, EXAMPLE_PLUGIN_PATH, '/var')

            # install plugin

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for node in ('slave-01', 'slave-02', 'slave-03'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'
            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(1, len(res_pgrep['stdout']),
                             'Failed with error {0} on the '
                             'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_curl['stderr'], node))

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha")
Пример #25
0
    def rollback_automatically_simple_env(self):
        """Rollback automatically simple deployed cluster

        Scenario:
            1. Revert snapshot with simple neutron gre env
            2. Add raise exception to docker_engine.py file
            3. Run upgrade on master
            4. Check that rollback starts automatically
            5. Check that cluster was not upgraded and run OSTf
            6. Add 1 cinder node and re-deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre")
        cluster_id = self.fuel_web.get_last_created_cluster()
        remote = self.env.get_ssh_to_remote_by_name('slave-01')
        expected_kernel = UpgradeFuelMaster.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        self.fuel_web.modify_python_file(self.env.get_admin_remote(),
                                         "2i \ \ \ \ 2014.2-6.0: blah-blah",
                                         '/var/upgrade/releases/'
                                         'metadata.yaml')
        #we expect 255 exit code here because upgrade failed
        # and exit status is 255
        checkers.run_script(self.env.get_admin_remote(), '/var', 'upgrade.sh',
                            password=
                            hlp_data.KEYSTONE_CREDS['password'],
                            rollback=True, exit_code=255)
        checkers.wait_rollback_is_done(self.env.get_admin_remote(), 3000)
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_TO,
                                           hlp_data.UPGRADE_FUEL_FROM)
        logger.debug("all containers are ok")
        _wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
            self.env.nodes().slaves[0]), timeout=120)
        logger.debug("all services are up now")
        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:3])
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_FROM)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['cinder']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            remote = self.env.get_ssh_to_remote_by_name('slave-04')
            kernel = UpgradeFuelMaster.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("rollback_automatic_simple")
Пример #26
0
    def deploy_ha_after_upgrade(self):
        """Upgrade and deploy new ha cluster

        Scenario:
            1. Revert snapshot with simple ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Re-deploy cluster
            5. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'ceph_multinode_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_multinode_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.TARBALL_PATH),
                       '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var',
                            'upgrade.sh', password=
                            hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:9])
        segment_type = 'vlan'
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type
            },
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['controller'],
                'slave-07': ['compute'],
                'slave-08': ['compute'],
                'slave-09': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            remote = self.env.get_ssh_to_remote_by_name('slave-04')
            self.check_upgraded_kernel(self.env.get_admin_remote(), remote)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("deploy_ha_after_upgrade")
Пример #27
0
    def deploy_zabbix_ceph_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller,ceph-osd roles
            5. Add 2 node with compute,ceph-osd roles
            6. Deploy the cluster
            7. Run network verification
            8. Run OSTF
            9. Check zabbix service in pacemaker
            10. Check login to zabbix dashboard

        Duration 180m
        Snapshot deploy_zabbix_ceph_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(remote, conf.ZABBIX_PLUGIN_PATH, "/var")
            checkers.install_plugin_check_code(remote,
                                               plugin=os.path.basename(
                                                   conf.ZABBIX_PLUGIN_PATH))

        settings = {}
        if conf.NEUTRON_ENABLE:
            settings = {
                "net_provider": "neutron",
                "net_segment_type": conf.NEUTRON_SEGMENT_TYPE
            }

        settings.update({
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False,
            'tenant': 'cephHA',
            'user': '******',
            'password': '******',
            'osd_pool_size': "3"
        })
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=conf.DEPLOYMENT_MODE,
                                                  settings=settings)

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute', 'ceph-osd'],
            })

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=190 * 60)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ceph_ha")
Пример #28
0
    def deploy_zabbix_snmp_extreme_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check Extreme Switch trigger with test SNMP message

        Duration 70m
        Snapshot deploy_zabbix_snmp_extreme_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            for plugin in [
                    conf.ZABBIX_PLUGIN_PATH, conf.ZABBIX_SNMP_PLUGIN_PATH,
                    conf.ZABBIX_SNMP_EXTREME_PLUGIN_PATH
            ]:
                checkers.upload_tarball(remote, plugin, "/var")
                checkers.install_plugin_check_code(
                    remote, plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)
        self.setup_snmp_plugin(cluster_id, snmp_community)
        self.setup_snmp_extreme_plugin(cluster_id)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_extreme_critical_command = \
            ("snmptrap -v 1 -c {snmp_community} {management_vip} "
             "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916"
             " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'"
             .format(snmp_community=snmp_community,
                     management_vip=management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_extreme_critical_command)

        public_vip = self.fuel_web.get_public_vip(cluster_id)
        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        wait(lambda: self.check_event_message(zabbix_web, 'extreme',
                                              'Power Supply Failed'))

        self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")
Пример #29
0
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            for plugin in [
                    conf.ZABBIX_PLUGIN_PATH, conf.ZABBIX_SNMP_PLUGIN_PATH
            ]:
                checkers.upload_tarball(remote, plugin, "/var")
                checkers.install_plugin_check_code(
                    remote, plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0)

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
Пример #30
0
    def separate_all_service(self):
        """Deploy cluster with 3 nodes with db, keystone, rabbit, horizon

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with database, keystone, rabbit,
               horizon
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_all_service
        """
        self.check_run("separate_all_service")
        self.env.revert_snapshot("ready_with_9_slaves")

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugins to the master node

            checkers.upload_tarball(remote,
                                    settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
                                    "/var")

            checkers.upload_tarball(
                remote, settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH, "/var")

            checkers.upload_tarball(
                remote, settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH, "/var")

            # install plugins

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(
                    settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(
                    settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH))

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(
                    settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH))

        data = {
            'tenant': 'separateall',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_names = [
            'detach-database', 'detach-keystone', 'detach-rabbitmq'
        ]
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        for plugin_name in plugin_names:
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            options = {'metadata/enabled': True}
            self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': [
                    'standalone-database', 'standalone-rabbitmq',
                    'standalone-keystone'
                ],
                'slave-05': [
                    'standalone-database', 'standalone-keystone',
                    'standalone-rabbitmq'
                ],
                'slave-06': [
                    'standalone-database', 'standalone-keystone',
                    'standalone-rabbitmq'
                ],
                'slave-07': ['compute'],
                'slave-08': ['cinder']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_all_service", is_make=True)
Пример #31
0
    def separate_db_service(self):
        """Deploy cluster with 3 separate database roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with database role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_db_service
        """
        self.check_run("separate_db_service")
        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        checkers.upload_tarball(
            self.env.d_env.get_admin_remote(),
            settings.SEPARATE_SERVICE_DB_PLUGIN_PATH, "/var")

        # install plugins

        checkers.install_plugin_check_code(
            self.env.d_env.get_admin_remote(),
            plugin=os.path.basename(settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

        data = {
            'tenant': 'separatedb',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-database'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database'],
                'slave-05': ['standalone-database'],
                'slave-06': ['standalone-database'],
                'slave-07': ['compute'],
                'slave-08': ['cinder']
            }
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("separate_db_service", is_make=True)
Пример #32
0
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster with one controller and example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote,
                EXAMPLE_PLUGIN_PATH, '/var')

            # install plugin

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        segment_type = NEUTRON_SEGMENT['vlan']
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type,
            }
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
Пример #33
0
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster with one controller and example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(
            self.env.d_env.get_admin_remote(),
            EXAMPLE_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.d_env.get_admin_remote(),
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        segment_type = 'vlan'
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type,
            }
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
        res_pgrep = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd)
        assert_equal(0, res_pgrep['exit_code'],
                     'Failed with error {0}'.format(res_pgrep['stderr']))
        assert_equal(1, len(res_pgrep['stdout']),
                     'Failed with error {0}'.format(res_pgrep['stderr']))
        # curl to service
        _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
        res_curl = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd_curl)
        assert_equal(0, res_pgrep['exit_code'],
                     'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
Пример #34
0
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as admin_remote:
            # copy plugin to the master node
            checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
            checkers.upload_tarball(
                admin_remote,
                EXAMPLE_PLUGIN_V3_PATH,
                '/var')
            # install plugin
            checkers.install_plugin_check_code(
                admin_remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        with self.env.fuel_web.get_ssh_for_node('slave-01') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_sh')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_puppet')

            # check if fuel_plugin_example_v3_puppet called
            # between netconfig and connectivity_tests
            netconfig_str = 'MODULAR: netconfig.pp'
            plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
            connect_str = 'MODULAR: connectivity_tests.pp'
            checkers.check_log_lines_order(remote,
                                           log_file_path='/var/log/puppet.log',
                                           line_matcher=[netconfig_str,
                                                         plugin_str,
                                                         connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        with self.env.fuel_web.get_ssh_for_node('slave-02') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        with self.env.fuel_web.get_ssh_for_node('slave-03') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_sh')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_puppet')

            # check if service run on slave-03
            logger.debug("Checking service on node {0}".format('slave-03'))

            cmd = 'pgrep -f fuel-simple-service'
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Command {0} failed with error {1}'
                         .format(cmd, res_pgrep['stderr']))
            process_count = len(res_pgrep['stdout'])
            assert_equal(1, process_count,
                         "There should be 1 process 'fuel-simple-service',"
                         " but {0} found {1} processes".format(cmd,
                                                               process_count))

            # curl to service
            cmd_curl = 'curl localhost:8234'
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Command {0} failed with error {1}'
                         .format(cmd_curl, res_curl['stderr']))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3")
Пример #35
0
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, conf.ZABBIX_PLUGIN_PATH, "/var")
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.ZABBIX_PLUGIN_PATH))

        settings = None
        if conf.NEUTRON_ENABLE:
            settings = {
                "net_provider": "neutron",
                "net_segment_type": conf.NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
            settings=settings
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        cmd = "crm resource status p_zabbix-server"
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            response = remote.execute(cmd)["stdout"][0]
        assert_true("p_zabbix-server is running" in response,
                    "p_zabbix-server resource wasn't found in pacemaker:\n{0}"
                    .format(response))

        public_vip = self.fuel_web.get_public_vip(cluster_id)

        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        screens_html = bs4.BeautifulSoup(zabbix_web.get_screens())
        screens_links = screens_html.find_all('a')
        assert_true(any('charts.php?graphid=' in link.get('href')
                        for link in screens_links),
                    "Zabbix screen page does not contain graphs:\n{0}".
                    format(screens_links))

        self.env.make_snapshot("deploy_zabbix_ha")
Пример #36
0
    def deploy_ha_one_controller_glusterfs_simple(self):
        """Deploy cluster with one controller and glusterfs plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller and cinder roles
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_glusterfs
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(
                remote, GLUSTER_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(GLUSTER_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'external_glusterfs'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True,
                   'endpoint/value': GLUSTER_CLUSTER_ENDPOINT}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-03'):
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                self.check_glusterfs_conf(
                    remote=remote,
                    path='/etc/cinder/glusterfs',
                    gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_glusterfs")
Пример #37
0
    def upgrade_ha_env(self):
        """Upgrade ha deployed cluster

        Scenario:
            1. Revert snapshot with neutron gre ha env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Check cluster is operable
            5. Create new simple Vlan cluster
            6. Deploy cluster
            7. Run OSTF

        """
        if not self.env.get_virtual_environment().has_snapshot(
                'deploy_neutron_gre_ha'):
            raise SkipTest()

        self.env.revert_snapshot("deploy_neutron_gre_ha")
        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var',
                            'upgrade.sh', password=
                            hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.env.bootstrap_nodes(self.env.nodes().slaves[5:7])
        data = {
            'tenant': 'novaSimpleVlan',
            'user': '******',
            'password': '******'
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE_SIMPLE,
            settings=data,
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-06': ['controller'],
                'slave-07': ['compute']
            }
        )
        self.fuel_web.update_vlan_network_fixed(
            cluster_id, amount=8, network_size=32)

        self.fuel_web.deploy_cluster_wait(cluster_id)

        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name('slave-06')['ip'],
            data['user'], data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=6, networks_count=8, timeout=300)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            remote = self.env.get_ssh_to_remote_by_name('slave-06')
            self.check_upgraded_kernel(self.env.get_admin_remote(), remote)
        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("upgrade_ha")
Пример #38
0
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as admin_remote:
            # copy plugin to the master node
            checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
            checkers.upload_tarball(
                admin_remote,
                EXAMPLE_PLUGIN_V3_PATH,
                '/var')
            # install plugin
            checkers.install_plugin_check_code(
                admin_remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE
            }
        )

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        with self.env.fuel_web.get_ssh_for_node('slave-01') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_sh')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_puppet')

            # check if fuel_plugin_example_v3_puppet called
            # between netconfig and connectivity_tests
            netconfig_str = 'MODULAR: netconfig.pp'
            plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
            connect_str = 'MODULAR: connectivity_tests.pp'
            checkers.check_log_lines_order(remote,
                                           log_file_path='/var/log/puppet.log',
                                           line_matcher=[netconfig_str,
                                                         plugin_str,
                                                         connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        with self.env.fuel_web.get_ssh_for_node('slave-02') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        with self.env.fuel_web.get_ssh_for_node('slave-03') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_sh')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_puppet')

            # check if service run on slave-03
            logger.debug("Checking service on node {0}".format('slave-03'))

            cmd = 'pgrep -f fuel-simple-service'
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Command {0} failed with error {1}'
                         .format(cmd, res_pgrep['stderr']))
            process_count = len(res_pgrep['stdout'])
            assert_equal(1, process_count,
                         "There should be 1 process 'fuel-simple-service',"
                         " but {0} found {1} processes".format(cmd,
                                                               process_count))

            # curl to service
            cmd_curl = 'curl localhost:8234'
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Command {0} failed with error {1}'
                         .format(cmd_curl, res_curl['stderr']))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3")
Пример #39
0
    def deploy_neutron_lbaas_simple(self):
        """Deploy cluster in simple mode with LbaaS plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check health of lbaas agent on the node
            9. Create pool and vip
            10. Run OSTF

        Snapshot deploy_neutron_vlan_lbaas_simple

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(
            self.env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.get_admin_remote(),
            plugin=os.path.basename(LBAAS_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_SIMPLE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE,
            }
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        if 'lbaas' in attr['editable']:
            logger.debug('we have lbaas element')
            plugin_data = attr['editable']['lbaas']['metadata']
            plugin_data['enabled'] = True

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        asserts.assert_equal(str(cluster['net_provider']), 'neutron')

        self.fuel_web.verify_network(cluster_id)

        controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
        os_conn = os_actions.OpenStackActions(controller['ip'])

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbass_work(os_conn)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_vlan_lbaas_simple")
Пример #40
0
    def deploy_lma_toolchain(self):
        """Deploy cluster in HA mode with the LMA toolchain

        This also deploys the Elasticsearch-Kibana plugin and the
        InfluxDB-Grafana plugin since they work together with the LMA collector
        plugin.

        Scenario:
            1. Upload plugins to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute + cinder role
            6. Add 1 node with influxdb_grafana + elasticsearch_kibana +
               infrastructure_alerting roles
            7. Deploy the cluster
            8. Check that the plugins work
            9. Run OSTF

        Duration 150m
        Snapshot deploy_lma_toolchain

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # TODO(scroiset): use actions fuel_actions.py
        # upload_plugin and install_plugin
        with self.env.d_env.get_admin_remote() as remote:
            # copy plugins to the master node
            checkers.upload_tarball(
                remote,
                conf.LMA_COLLECTOR_PLUGIN_PATH, "/var")
            checkers.upload_tarball(
                remote,
                conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH, "/var")
            checkers.upload_tarball(
                remote,
                conf.INFLUXDB_GRAFANA_PLUGIN_PATH, "/var")
            checkers.upload_tarball(
                remote,
                conf.LMA_INFRA_ALERTING_PLUGIN_PATH, "/var")

            # install plugins
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_COLLECTOR_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.INFLUXDB_GRAFANA_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        influxdb_user = "******"
        influxdb_pass = "******"
        influxdb_rootpass = "******"
        grafana_user = "******"
        grafana_pass = "******"
        mysql_dbname = "grafanalma"
        mysql_user = "******"
        mysql_pass = "******"
        nagios_pass = "******"
        plugins = [
            {
                'name': 'lma_collector',
                'version': '0.9.0',
                'options': {
                    'environment_label/value': 'deploy_lma_toolchain',
                    'elasticsearch_mode/value': 'local',
                    'influxdb_mode/value': 'local',
                    'alerting_mode/value': 'local',
                }
            },
            {
                'name': 'elasticsearch_kibana',
                'version': '0.9.0',
                'options': {
                }
            },
            {
                'name': 'lma_infrastructure_alerting',
                'version': '0.9.0',
                'options': {
                    'send_to/value': 'root@localhost',
                    'send_from/value': 'nagios@localhost',
                    'smtp_host/value': '127.0.0.1',
                    'nagios_password/value': nagios_pass,
                }
            },
            {
                'name': 'influxdb_grafana',
                'version': '0.9.0',
                'options': {
                    'influxdb_rootpass/value': influxdb_rootpass,
                    'influxdb_username/value': influxdb_user,
                    'influxdb_userpass/value': influxdb_pass,
                    'grafana_username/value': grafana_user,
                    'grafana_userpass/value': grafana_pass,
                    'mysql_mode/value': 'local',
                    'mysql_dbname/value': mysql_dbname,
                    'mysql_username/value': mysql_user,
                    'mysql_password/value': mysql_pass,
                }
            },
        ]
        for plugin in plugins:
            plugin_name = plugin['name']
            plugin_version = plugin['version']
            msg = "Plugin '%s' couldn't be found. Test aborted" % plugin_name
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            logger.debug('%s plugin is installed' % plugin_name)
            self.fuel_web.update_plugin_settings(
                cluster_id, plugin_name,
                plugin_version, plugin['options'])

        analytics_roles = ["influxdb_grafana",
                           "elasticsearch_kibana",
                           "infrastructure_alerting"]
        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute", "cinder"],
                "slave-05": analytics_roles,
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=9000)

        analytics_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, analytics_roles
        )
        msg = "One node with '{}' roles must be present, found {}".format(
            ' + '.join(analytics_roles), len(analytics_nodes))

        assert_true(len(analytics_nodes) == 1, msg)

        elasticsearch_kibana_vip = self.get_vip(cluster_id, 'es_vip_mgmt')
        influxdb_grafana_vip = self.get_vip(cluster_id, 'influxdb')
        nagios_vip = self.get_vip(cluster_id, 'infrastructure_alerting')
        assert_is_not_none(
            elasticsearch_kibana_vip,
            "Fail to retrieve the Elasticsearch/Kibana cluster VIP address"
        )
        assert_is_not_none(
            influxdb_grafana_vip,
            "Fail to retrieve the InfluxDB/Grafana cluster VIP address"
        )
        assert_is_not_none(
            nagios_vip,
            "Fail to retrieve the Infrastructure Alerting cluster VIP address"
        )

        def assert_http_get_response(url, expected=200):
            r = requests.get(url)
            assert_equal(r.status_code, expected,
                         "{} responded with {}, expected {}".format(
                             url, r.status_code, expected))

        logger.debug("Check that Elasticsearch is ready")
        assert_http_get_response("http://{0}:9200/".format(
            elasticsearch_kibana_vip))

        logger.debug("Check that Kibana is ready")
        assert_http_get_response("http://{0}/".format(
            elasticsearch_kibana_vip))

        logger.debug("Check that the root user can access InfluxDB")
        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,
                                                     'root',
                                                     influxdb_rootpass))
        logger.debug("Check that the LMA user can access InfluxDB")
        assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,
                                                     influxdb_user,
                                                     influxdb_pass))

        logger.debug("Check that the LMA user can access Grafana")
        assert_http_get_response(
            "http://{0}:{1}@{2}:8000/api/org".format(grafana_user,
                                                     grafana_pass,
                                                     influxdb_grafana_vip))

        nagios_url = "http://{}:{}".format(nagios_vip, '8001')
        r = requests.get(nagios_url, auth=('nagiosadmin',
                                           nagios_pass))
        assert_equal(
            r.status_code, 200,
            "Nagios HTTP response code {}, expected {}".format(
                r.status_code, 200)
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_lma_toolchain")
Пример #41
0
    def deploy_elasticsearch_kibana_plugin(self):
        """Deploy a cluster with the Elasticsearch-Kibana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with base-os role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_elasticsearch_kibana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(
                remote,
                ELASTICSEARCH_KIBANA_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE,
            }
        )

        plugin_name = 'elasticsearch_kibana'
        options = {'metadata/enabled': True,
                   'node_name/value': 'slave-03_base-os'}
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['base-os']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        es_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        es_server_ip = es_server.get('ip')
        assert_is_not_none(es_server_ip,
                           "Failed to get the IP of Elasticsearch server")

        logger.debug("Check that Elasticseach is ready")

        r = requests.get("http://{}:9200/".format(es_server_ip))
        msg = "Elasticsearch responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the HTTP server is running")

        r = requests.get("http://{}/".format(es_server_ip))
        msg = "HTTP server responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_elasticsearch_kibana_plugin")
Пример #42
0
    def upgrade_ha_one_controller_delete_node(self):
        """Upgrade ha 1 controller deployed cluster with ceph and
           delete node from old cluster

        Scenario:
            1. Revert ceph_ha_one_controller_compact snapshot
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Delete one compute+ceph node
            7. Re-deploy cluster
            8. Run OSTF

        """

        # For upgrade jobs *from* 6.1, change snapshot name to
        # "ceph_ha_one_controller_compact"
        if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
            raise SkipTest()
        self.env.revert_snapshot("ceph_multinode_compact")

        cluster_id = self.fuel_web.get_last_created_cluster()
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        remote_ceph = self.fuel_web.get_ssh_for_node('slave-03')
        self.fuel_web.prepare_ceph_to_delete(remote_ceph)
        nailgun_nodes = self.fuel_web.update_nodes(
            cluster_id, {'slave-03': ['compute', 'ceph-osd']}, False, True)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_success(task)
        nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
        try:
            wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,
                 timeout=5 * 60)
        except TimeoutError:
            assert_true(len(self.fuel_web.client.list_nodes()) == 3,
                        'Node {0} is not discovered in timeout 10 *60'.format(
                            nodes[0]))
        self.fuel_web.run_ostf(cluster_id=cluster_id, should_fail=1)
        self.env.make_snapshot("upgrade_ha_one_controller_delete_node")
Пример #43
0
    def deploy_neutron_lbaas_simple_reset_ready(self):
        """Deploy and re-deploy cluster in simple mode with LbaaS plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check health of lbaas agent on the node
            9. Create pool and vip
            10. Reset cluster
            11. Add 1 compute
            12. Re-deploy cluster
            13. Check health of lbaas agent on the node
            14. Create pool and vip
            15. Run OSTF

        Duration 65m
        Snapshot deploy_neutron_lbaas_simple_reset_ready

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(remote, LBAAS_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote, plugin=os.path.basename(LBAAS_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_SIMPLE,
        )

        plugin_name = 'lbaas'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        logger.debug('we have lbaas element')
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute'],
        })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        asserts.assert_equal(str(cluster['net_provider']), 'neutron')

        self.fuel_web.verify_network(cluster_id)

        controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
        os_conn = os_actions.OpenStackActions(controller['ip'])

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbaas_work(os_conn)

        self.fuel_web.stop_reset_env_wait(cluster_id)

        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:2])

        self.fuel_web.update_nodes(cluster_id, {
            'slave-03': ['compute'],
        })

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbaas_work(os_conn)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_lbaas_simple_reset_ready")
Пример #44
0
    def deploy_ha_after_upgrade(self):
        """Upgrade and deploy new ha cluster

        Scenario:
            1. Revert snapshot with ha 1 controller ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Re-deploy cluster
            7. Run OSTF

        """

        # For upgrade jobs *from* 6.1, change snapshot name to
        # "ceph_ha_one_controller_compact"
        if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
            raise SkipTest()
        self.env.revert_snapshot("ceph_multinode_compact")

        cluster_id = self.fuel_web.get_last_created_cluster()
        available_releases_before = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.TARBALL_PATH),
                       '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        available_releases_after = self.fuel_web.get_releases_list_for_os(
            release_name=hlp_data.OPENSTACK_RELEASE)
        added_release = [id for id in available_releases_after
                         if id not in available_releases_before]
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:9])
        segment_type = 'vlan'
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=hlp_data.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type
            },
            release_id=added_release[0]
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
                'slave-06': ['controller'],
                'slave-07': ['compute'],
                'slave-08': ['compute'],
                'slave-09': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        assert_equal(str(cluster['net_provider']), 'neutron')
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            logger.debug("ubuntu kernel version"
                         " on new node is {}".format(kernel))
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)
        self.env.make_snapshot("deploy_ha_after_upgrade")
Пример #45
0
    def deploy_emc_ha(self):
        """Deploy cluster in ha mode with emc plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin installation
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_emc
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(remote, CONF.EMC_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(remote,
                                               plugin=os.path.basename(
                                                   CONF.EMC_PLUGIN_PATH))

        settings = None

        if CONF.NEUTRON_ENABLE:
            settings = {
                "net_provider": 'neutron',
                "net_segment_type": CONF.NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=CONF.DEPLOYMENT_MODE,
                                                  settings=settings)

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)

        # check plugin installed and attributes have emc options

        for option in [
                "emc_sp_a_ip", "emc_sp_b_ip", "emc_username", "emc_password",
                "emc_pool_name"
        ]:
            asserts.assert_true(
                option in attr["editable"]["emc_vnx"],
                "{0} is not in cluster attributes: {1}".format(
                    option, str(attr["editable"]["storage"])))

        # disable LVM-based volumes

        attr["editable"]["storage"]["volumes_lvm"]["value"] = False

        # enable EMC plugin

        emc_options = attr["editable"]["emc_vnx"]
        emc_options["metadata"]["enabled"] = True
        emc_options["emc_sp_a_ip"]["value"] = CONF.EMC_SP_A_IP
        emc_options["emc_sp_b_ip"]["value"] = CONF.EMC_SP_B_IP
        emc_options["emc_username"]["value"] = CONF.EMC_USERNAME
        emc_options["emc_password"]["value"] = CONF.EMC_PASSWORD
        emc_options["emc_pool_name"]["value"] = CONF.EMC_POOL_NAME

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # get remotes for all nodes

        controller_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-01', 'slave-02', 'slave-03']
        ]
        compute_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-04', 'slave-05']
        ]

        controller_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in controller_nodes
        ]
        compute_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in compute_nodes
        ]

        # check cinder-volume settings

        for remote in controller_remotes:
            self.check_emc_cinder_config(remote=remote,
                                         path='/etc/cinder/cinder.conf')
            self.check_emc_management_package(remote=remote)

        # check cinder-volume layout on controllers

        cinder_volume_ctrls = [
            self.check_service(controller, "cinder-volume")
            for controller in controller_remotes
        ]
        asserts.assert_equal(
            sum(cinder_volume_ctrls), 1, "Cluster has more than one "
            "cinder-volume on controllers")

        # check cinder-volume layout on computes

        cinder_volume_comps = [
            self.check_service(compute, "cinder-volume")
            for compute in compute_remotes
        ]
        # closing connections
        [remote.clear() for remote in controller_remotes]
        [remote.clear() for remote in compute_remotes]

        asserts.assert_equal(sum(cinder_volume_comps), 0,
                             "Cluster has active cinder-volume on compute")

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_emc")
Пример #46
0
    def upgrade_ha_one_controller_env(self):
        """Upgrade ha one controller deployed cluster with ceph

        Scenario:
            1. Revert snapshot with ha one controller ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Run network verification
            5. Run OSTF
            6. Add another compute node
            7. Re-deploy cluster
            8. Run OSTF

        """

        # For upgrade jobs *from* 6.1, change snapshot name to
        # "ceph_ha_one_controller_compact"
        if not self.env.d_env.has_snapshot('ceph_multinode_compact'):
            raise SkipTest()
        self.env.revert_snapshot("ceph_multinode_compact")

        cluster_id = self.fuel_web.get_last_created_cluster()

        _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
        remote = self.env.d_env.get_ssh_to_remote(_ip)
        expected_kernel = self.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.d_env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.d_env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.d_env.get_admin_remote(),
                            '/var', 'upgrade.sh',
                            password=hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.d_env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADING MASTER NODE'
                                             ' DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.d_env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:3])
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.bootstrap_nodes(
            self.env.d_env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id),
            user='******', tenant='ceph1', passwd='ceph1')
        self.fuel_web.assert_cluster_ready(
            os_conn,
            smiles_count=7 if hlp_data.NEUTRON_ENABLE else 10,
            networks_count=2 if hlp_data.NEUTRON_ENABLE else 1,
            timeout=300)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']
            remote = self.env.d_env.get_ssh_to_remote(_ip)
            kernel = self.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        create_diagnostic_snapshot(
            self.env, "pass", "upgrade_ha_one_controller_env")

        self.env.make_snapshot("upgrade_ha_one_controller")
Пример #47
0
    def upgrade_simple_env(self):
        """Upgrade simple deployed cluster with ceph

        Scenario:
            1. Revert snapshot with simple ceph env
            2. Run upgrade on master
            3. Check that upgrade was successful
            4. Add another compute node
            5. Re-deploy cluster
            6. Run OSTF

        """

        if not self.env.get_virtual_environment().has_snapshot(
                'ceph_multinode_compact'):
            raise SkipTest()

        self.env.revert_snapshot("ceph_multinode_compact")
        cluster_id = self.fuel_web.get_last_created_cluster()
        remote = self.env.get_ssh_to_remote_by_name('slave-01')
        expected_kernel = self.get_slave_kernel(remote)

        checkers.upload_tarball(self.env.get_admin_remote(),
                                hlp_data.TARBALL_PATH, '/var')
        checkers.check_tarball_exists(self.env.get_admin_remote(),
                                      os.path.basename(hlp_data.
                                                       TARBALL_PATH),
                                      '/var')
        checkers.untar(self.env.get_admin_remote(),
                       os.path.basename(hlp_data.
                                        TARBALL_PATH), '/var')
        checkers.run_script(self.env.get_admin_remote(), '/var',
                            'upgrade.sh', password=
                            hlp_data.KEYSTONE_CREDS['password'])
        checkers.wait_upgrade_is_done(self.env.get_admin_remote(), 3000,
                                      phrase='*** UPGRADE DONE SUCCESSFULLY')
        checkers.check_upgraded_containers(self.env.get_admin_remote(),
                                           hlp_data.UPGRADE_FUEL_FROM,
                                           hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nodes_in_ready_state(cluster_id)
        self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
        self.fuel_web.assert_nailgun_upgrade_migration()
        self.env.bootstrap_nodes(self.env.nodes().slaves[3:4])
        self.fuel_web.update_nodes(
            cluster_id, {'slave-04': ['compute']},
            True, False
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_nailgun_node_by_name('slave-01')['ip'],
            user='******', tenant='ceph1', passwd='ceph1')
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=10, networks_count=1, timeout=300)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:
            remote = self.env.get_ssh_to_remote_by_name('slave-04')
            kernel = self.get_slave_kernel(remote)
            checkers.check_kernel(kernel, expected_kernel)
        create_diagnostic_snapshot(self.env, "pass", "upgrade_simple_env")

        self.env.make_snapshot("upgrade_simple")
Пример #48
0
    def deploy_nova_example_ha(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. check plugin health
            10. Run OSTF

        Duration 70m
        Snapshot deploy_nova_example_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(self.env.get_admin_remote(),
                                EXAMPLE_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.get_admin_remote(),
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        if 'fuel_plugin_example' in attr['editable']:
            plugin_data = attr['editable']['fuel_plugin_example']['metadata']
            plugin_data['enabled'] = True

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for node in ('slave-01', 'slave-02', 'slave-03'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'
            res_pgrep = self.env.get_ssh_to_remote_by_name(node).execute(cmd)
            assert_equal(
                0, res_pgrep['exit_code'], 'Failed with error {0} '
                'on node {1}'.format(res_pgrep['stderr'], node))
            assert_equal(
                1, len(res_pgrep['stdout']), 'Failed with error {0} on the '
                'node {1}'.format(res_pgrep['stderr'], node))
            # curl to service
            res_curl = self.env.get_ssh_to_remote_by_name(node).execute(
                cmd_curl)
            assert_equal(
                0, res_pgrep['exit_code'], 'Failed with error {0} '
                'on node {1}'.format(res_curl['stderr'], node))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_nova_example_ha")
Пример #49
0
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(self.env.get_admin_remote(),
                                EXAMPLE_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.get_admin_remote(),
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        segment_type = 'vlan'
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      segment_type,
                                                  })

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        if 'fuel_plugin_example' in attr['editable']:
            plugin_data = attr['editable']['fuel_plugin_example']['metadata']
            plugin_data['enabled'] = True

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.env.get_ssh_to_remote_by_name('slave-01').execute(cmd)
        assert_equal(0, res_pgrep['exit_code'],
                     'Failed with error {0}'.format(res_pgrep['stderr']))
        assert_equal(1, len(res_pgrep['stdout']),
                     'Failed with error {0}'.format(res_pgrep['stderr']))
        # curl to service
        res_curl = self.env.get_ssh_to_remote_by_name('slave-01').execute(
            cmd_curl)
        assert_equal(0, res_pgrep['exit_code'],
                     'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
Пример #50
0
    def deploy_nova_example_ha(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. check plugin health
            10. Run OSTF

        Duration 70m
        Snapshot deploy_nova_example_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(
            self.env.d_env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.d_env.get_admin_remote(),
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for node in ('slave-01', 'slave-02', 'slave-03'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            res_pgrep = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0} '
                         'on node {1}'.format(res_pgrep['stderr'], node))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0} on the '
                         'node {1}'.format(res_pgrep['stderr'], node))
            # curl to service
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            res_curl = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0} '
                         'on node {1}'.format(res_curl['stderr'], node))

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_nova_example_ha")
Пример #51
0
    def separate_horizon_service(self):
        """Deploy cluster with 3 separate horizon roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with horizon role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_horizon_service
        """
        self.check_run("separate_horizon_service")
        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        checkers.upload_tarball(self.env.d_env.get_admin_remote(),
                                settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH,
                                "/var")

        # install plugins

        checkers.install_plugin_check_code(
            self.env.d_env.get_admin_remote(),
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'objects_ceph': True,
            'tenant': 'separatehorizon',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-horizon'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-horizon'],
                'slave-05': ['stabdalone-horizon'],
                'slave-06': ['standalone-horizon'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_horizon_service", is_make=True)
Пример #52
0
    def deploy_emc_ha(self):
        """Deploy cluster in ha mode with emc plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin installation
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_emc
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(
                remote,
                CONF.EMC_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(CONF.EMC_PLUGIN_PATH))

        settings = None

        if CONF.NEUTRON_ENABLE:
            settings = {
                "net_provider": 'neutron',
                "net_segment_type": CONF.NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=CONF.DEPLOYMENT_MODE,
            settings=settings
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)

        # check plugin installed and attributes have emc options

        for option in ["emc_sp_a_ip", "emc_sp_b_ip",
                       "emc_username", "emc_password", "emc_pool_name"]:
            asserts.assert_true(option in attr["editable"]["emc_vnx"],
                                "{0} is not in cluster attributes: {1}".
                                format(option,
                                       str(attr["editable"]["storage"])))

        # disable LVM-based volumes

        attr["editable"]["storage"]["volumes_lvm"]["value"] = False

        # enable EMC plugin

        emc_options = attr["editable"]["emc_vnx"]
        emc_options["metadata"]["enabled"] = True
        emc_options["emc_sp_a_ip"]["value"] = CONF.EMC_SP_A_IP
        emc_options["emc_sp_b_ip"]["value"] = CONF.EMC_SP_B_IP
        emc_options["emc_username"]["value"] = CONF.EMC_USERNAME
        emc_options["emc_password"]["value"] = CONF.EMC_PASSWORD
        emc_options["emc_pool_name"]["value"] = CONF.EMC_POOL_NAME

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # get remotes for all nodes

        controller_nodes = [self.fuel_web.get_nailgun_node_by_name(node)
                            for node in ['slave-01', 'slave-02', 'slave-03']]
        compute_nodes = [self.fuel_web.get_nailgun_node_by_name(node)
                         for node in ['slave-04', 'slave-05']]

        controller_remotes = [self.env.d_env.get_ssh_to_remote(node['ip'])
                              for node in controller_nodes]
        compute_remotes = [self.env.d_env.get_ssh_to_remote(node['ip'])
                           for node in compute_nodes]

        # check cinder-volume settings

        for remote in controller_remotes:
            self.check_emc_cinder_config(
                remote=remote, path='/etc/cinder/cinder.conf')
            self.check_emc_management_package(remote=remote)

        # check cinder-volume layout on controllers

        cinder_volume_ctrls = [self.check_service(controller, "cinder-volume")
                               for controller in controller_remotes]
        asserts.assert_equal(sum(cinder_volume_ctrls), 1,
                             "Cluster has more than one "
                             "cinder-volume on controllers")

        # check cinder-volume layout on computes

        cinder_volume_comps = [self.check_service(compute, "cinder-volume")
                               for compute in compute_remotes]
        # closing connections
        [remote.clear() for remote in controller_remotes]
        [remote.clear() for remote in compute_remotes]

        asserts.assert_equal(sum(cinder_volume_comps), 0,
                             "Cluster has active cinder-volume on compute")

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_emc")
Пример #53
0
    def _bootstrap(self):

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugins to the master node
            checkers.upload_tarball(remote, conf.LMA_COLLECTOR_PLUGIN_PATH,
                                    "/var")
            checkers.upload_tarball(remote,
                                    conf.LMA_INFRA_ALERTING_PLUGIN_PATH,
                                    "/var")
            checkers.upload_tarball(remote, conf.INFLUXDB_GRAFANA_PLUGIN_PATH,
                                    "/var")

            # install plugins

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_COLLECTOR_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_INFRA_ALERTING_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.INFLUXDB_GRAFANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        plugins = [
            {
                'name': 'lma_collector',
                'options': {
                    'metadata/enabled': True,
                    'environment_label/value': 'deploy_lma_infra_alerting_ha',
                    'elasticsearch_mode/value': 'disabled',
                    'influxdb_mode/value': 'local',
                    'alerting_mode/value': 'local',
                }
            },
            {
                'name': 'lma_infrastructure_alerting',
                'options': {
                    'metadata/enabled': True,
                    'send_to/value': 'root@localhost',
                    'send_from/value': 'nagios@localhost',
                    'smtp_host/value': '127.0.0.1',
                    'nagios_password/value': self._nagios_password,
                }
            },
            {
                'name': 'influxdb_grafana',
                'options': {
                    'metadata/enabled': True,
                    'influxdb_rootpass/value': 'r00tme',
                    'influxdb_username/value': 'lma',
                    'influxdb_userpass/value': 'pass',
                    'grafana_username/value': 'grafana',
                    'grafana_userpass/value': 'grafanapass',
                }
            },
        ]
        for plugin in plugins:
            plugin_name = plugin['name']
            msg = "Plugin '%s' couldn't be found. Test aborted" % plugin_name
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            logger.debug('%s plugin is installed' % plugin_name)
            self.fuel_web.update_plugin_data(cluster_id, plugin_name,
                                             plugin['options'])

        return cluster_id