def _prepare_contrail_plugin(self, slaves=None, pub_net=False):
        """Copy necessary packages to the master node and install them"""

        self.env.revert_snapshot("ready_with_{:d}_slaves".format(slaves))

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugin to the master node
            utils.upload_tarball(
                ip=self.ssh_manager.admin_ip,
                tar_path=CONTRAIL_PLUGIN_PATH,
                tar_target='/var'
            )

            # install plugin
            utils.install_plugin_check_code(
                ip=self.ssh_manager.admin_ip,
                plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))

            # copy additional packages to the master node
            self._upload_contrail_packages(remote)

            # install packages
            self._install_packages(remote)

        # prepare fuel
        self._assign_net_provider(pub_net)
Exemplo n.º 2
0
def prepare_contrail_plugin(obj, slaves=None, snapshot_name=None, options={}):
    """Copy necessary packages to the master node and install them."""
    if slaves:
        snapshot_name = "ready_with_%d_slaves" % slaves
    obj.env.revert_snapshot(snapshot_name)

    # copy plugin to the master node
    utils.upload_tarball(obj.ssh_manager.admin_ip, CONTRAIL_PLUGIN_PATH,
                         '/var')

    # install plugin
    utils.install_plugin_check_code(
        obj.ssh_manager.admin_ip,
        plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))
    # FIXME: when opencontrail is supported
    # FIXME: remove the following line from 'or True'
    if obj.CONTRAIL_DISTRIBUTION == 'juniper' or True:
        # copy additional packages to the master node
        upload_contrail_packages(obj)

        # install packages
        install_packages(obj, obj.env.d_env.get_admin_remote())

    # prepare fuel
    openstack.assign_net_provider(obj, **options)
    def install_plugin(self):
        """Install plugin to Fuel"""
        assert_true(self.plugin_path, "plugin_path is not specified")

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(self.plugin_path))
Exemplo n.º 4
0
    def install_plugin(self):
        """Install plugin to Fuel"""
        assert_true(self.plugin_path, "plugin_path is not specified")

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(self.plugin_path))
Exemplo n.º 5
0
    def _bootstrap(self):

        # copy plugin to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH,
                             tar_target="/var")

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        plugin_options = {
            'send_to/value': 'root@localhost',
            'send_from/value': 'nagios@localhost',
            'smtp_host/value': '127.0.0.1',
            'nagios_password/value': self._nagios_password,
        }

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, self._name),
                    msg)
        logger.debug('{name:s} ({ver!s}) plugin is installed'
                     ''.format(name=self._name, ver=self._version))
        self.fuel_web.update_plugin_settings(cluster_id, self._name,
                                             self._version, plugin_options)

        return cluster_id
def install_dvs_plugin(master_node):
    """Download and instal DVS plugin on master node."""
    # copy plugins to the master node
    utils.upload_tarball(master_node, DVS_PLUGIN_PATH, "/var")

    # install plugin
    utils.install_plugin_check_code(master_node,
                                    os.path.basename(DVS_PLUGIN_PATH))
Exemplo n.º 7
0
 def install_plugin(self):
     """Method designed to install plugin on cluster."""
     master_remote = self.get_remote('master')
     utils.upload_tarball(master_remote.host,
                          os.environ['MISTRAL_PLUGIN_PATH'], '/var')
     utils.install_plugin_check_code(
         master_remote.host,
         os.path.basename(os.environ['MISTRAL_PLUGIN_PATH']))
Exemplo n.º 8
0
def install_dvs_plugin(master_node):
    """Download and instal DVS plugin on master node."""
    # copy plugins to the master node
    utils.upload_tarball(master_node, DVS_PLUGIN_PATH, "/var")

    # install plugin
    utils.install_plugin_check_code(master_node,
                                    os.path.basename(DVS_PLUGIN_PATH))
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.ZABBIX_PLUGIN_PATH,
            tar_target="/var")
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.ZABBIX_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ha")
Exemplo n.º 10
0
def install_manila_plugin(master_node_ip):
    """Install plugin packages to the master node."""

    utils.upload_tarball(
        master_node_ip,
        MANILA_PLUGIN_PATH, "/var")
    utils.install_plugin_check_code(
        master_node_ip,
        os.path.basename(MANILA_PLUGIN_PATH))
Exemplo n.º 11
0
 def install_plugin(self):
     """Method designed to install plugin on cluster."""
     master_remote = self.get_remote('master')
     utils.upload_tarball(master_remote.host,
                          os.environ['MISTRAL_PLUGIN_PATH'],
                          '/var')
     utils.install_plugin_check_code(
         master_remote.host,
         os.path.basename(os.environ['MISTRAL_PLUGIN_PATH']))
Exemplo n.º 12
0
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.ZABBIX_PLUGIN_PATH,
                             tar_target="/var")
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.basename(
                                            settings.ZABBIX_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ha")
Exemplo n.º 13
0
    def install_nsxt_plugin(self):
        """Download and install NSX-T plugin on master node.

        :return: None
        """
        master_ip = self.ssh_manager.admin_ip
        utils.upload_tarball(ip=master_ip,
                             tar_path=self.default.NSXT_PLUGIN_PATH,
                             tar_target='/var')

        utils.install_plugin_check_code(
            ip=master_ip,
            plugin=os.path.basename(self.default.NSXT_PLUGIN_PATH))
    def _bootstrap(self):

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH,
            tar_target="/var")

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        plugin_options = {
            'send_to/value': 'root@localhost',
            'send_from/value': 'nagios@localhost',
            'smtp_host/value': '127.0.0.1',
            'nagios_password/value': self._nagios_password,
        }

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, self._name),
                    msg)
        logger.debug(
            '{name:s} ({ver!s}) plugin is installed'
            ''.format(name=self._name, ver=self._version))
        self.fuel_web.update_plugin_settings(cluster_id,
                                             self._name,
                                             self._version,
                                             plugin_options)

        return cluster_id
Exemplo n.º 15
0
    def deploy_neutron_example_ha(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. check plugin health
            10. Run OSTF

        Duration 70m
        Snapshot deploy_neutron_example_ha

        """
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for node in ('slave-01', 'slave-02', 'slave-03'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'
            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(
                    0, res_pgrep['exit_code'], 'Failed with error {0} '
                    'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(
                    1, len(res_pgrep['stdout']),
                    'Failed with error {0} on the '
                    'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(
                    0, res_pgrep['exit_code'], 'Failed with error {0} '
                    'on node {1}'.format(res_curl['stderr'], node))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha")
Exemplo n.º 16
0
    def deploy_influxdb_grafana_plugin(self):
        """Deploy a cluster with the InfluxDB-Grafana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with influxdb_grafana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_influxdb_grafana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node and install it
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=INFLUXDB_GRAFANA_PLUGIN_PATH,
                             tar_target='/var')
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(INFLUXDB_GRAFANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'influxdb_grafana'
        options = {
            'metadata/enabled': True,
            'node_name/value': 'slave-03_influxdb_grafana',
            'influxdb_rootpass/value': 'lmapass',
            'influxdb_userpass/value': 'lmapass',
            'grafana_userpass/value': 'lmapass',
        }

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            "Plugin couldn't be enabled. Check plugin version. Test aborted")

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['influxdb_grafana']
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)

        influxdb_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        influxdb_server_ip = influxdb_server.get('ip')
        assert_is_not_none(influxdb_server_ip,
                           "Failed to get the IP of InfluxDB server")

        logger.debug("Check that InfluxDB is ready")

        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        r = requests.get(
            influxdb_url.format(influxdb_server_ip, 'lma',
                                options['influxdb_userpass/value']))
        msg = "InfluxDB responded with {}, expected 200".format(r.status_code)
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the Grafana server is running")

        r = requests.get("http://{0}:{1}@{2}:8000/api/org".format(
            'grafana', options['grafana_userpass/value'], influxdb_server_ip))
        msg = "Grafana server responded with {}, expected 200".format(
            r.status_code)
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_influxdb_grafana_plugin")
    def deploy_cluster_with_reboot_plugin_timeout(self):
        """Check deployment is failed by reboot task plugin.

        Scenario:
            1. Revert snapshot with 3 nodes
            2. Download and install fuel-plugin-builder
            3. Create plugin with reboot task,
               set timeout for reboot task as 1 second
            4. Build plugin
            5. Install plugin to fuel
            6. Create cluster and enable plugin
            7. Provision nodes
            8. Deploy cluster
            9. Check deployment was failed by reboot task
            10. Check error msg at the logs

        Duration 15m
        """
        # define some plugin related variables
        plugin_name = 'timeout_plugin'
        source_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        tasks_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'reboot_tasks.yaml'
        # start reverting snapshot
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")
        # let's get ssh client for the master node
        self.show_step(2)
        # initiate fuel plugin builder instance
        fpb = FuelPluginBuilder()
        # install fuel_plugin_builder on master node
        fpb.fpb_install()
        # change timeout to a new value '1'
        fpb.put_value_to_local_yaml(os.path.join(tasks_path, tasks_file),
                                    os.path.join('/tmp/', tasks_file),
                                    [1, 'parameters', 'timeout'], 1)
        self.show_step(3)
        # create plugin template on the master node
        fpb.fpb_create_plugin(source_plugin_path)
        # replace plugin tasks with our file
        fpb.fpb_replace_plugin_content(
            os.path.join('/tmp/', tasks_file),
            os.path.join(source_plugin_path, 'tasks.yaml'))
        # build plugin
        self.show_step(4)
        packet_name = fpb.fpb_build_plugin(source_plugin_path)
        # copy plugin archive file
        # to the /var directory on the master node
        fpb.fpb_copy_plugin(os.path.join(source_plugin_path, packet_name),
                            plugin_path)
        # let's install plugin
        self.show_step(5)
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.join(
                                            plugin_path, packet_name))
        # create cluster
        self.show_step(6)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable it
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('Cluster is {!s}'.format(cluster_id))

        self.fuel_web.update_nodes(cluster_id,
                                   {'slave-01': ['controller', 'ceph-osd']})
        self.show_step(7)
        self.fuel_web.provisioning_cluster_wait(cluster_id)
        logger.info('Start cluster #%s deployment', cluster_id)
        self.show_step(8)
        task = self.fuel_web.client.deploy_nodes(cluster_id)
        self.show_step(9)
        self.fuel_web.assert_task_failed(task)

        msg = 'Time detection (1 sec) for node reboot has expired'
        cmd = 'grep "{0}" /var/log/astute/astute.log'.format(msg)
        self.show_step(10)
        with self.env.d_env.get_admin_remote() as admin_remote:
            result = admin_remote.execute(cmd)['stdout'][0]

        asserts.assert_true(
            msg in result,
            'Failed to find reboot plugin warning message in logs')
Exemplo n.º 18
0
    def deploy_influxdb_grafana_plugin(self):
        """Deploy a cluster with the InfluxDB-Grafana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with influxdb_grafana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_influxdb_grafana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node and install it
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=INFLUXDB_GRAFANA_PLUGIN_PATH,
            tar_target='/var')
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(INFLUXDB_GRAFANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'influxdb_grafana'
        options = {
            'metadata/enabled': True,
            'node_name/value': 'slave-03_influxdb_grafana',
            'influxdb_rootpass/value': 'lmapass',
            'influxdb_userpass/value': 'lmapass',
            'grafana_userpass/value': 'lmapass',
        }

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            "Plugin couldn't be enabled. Check plugin version. Test aborted")

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['influxdb_grafana']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        influxdb_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        influxdb_server_ip = influxdb_server.get('ip')
        assert_is_not_none(influxdb_server_ip,
                           "Failed to get the IP of InfluxDB server")

        logger.debug("Check that InfluxDB is ready")

        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        r = requests.get(influxdb_url.format(
            influxdb_server_ip, 'lma', options['influxdb_userpass/value']))
        msg = "InfluxDB responded with {}, expected 200".format(r.status_code)
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the Grafana server is running")

        r = requests.get(
            "http://{0}:{1}@{2}:8000/api/org".format(
                'grafana', options['grafana_userpass/value'],
                influxdb_server_ip))
        msg = "Grafana server responded with {}, expected 200".format(
            r.status_code)
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_influxdb_grafana_plugin")
Exemplo n.º 19
0
    def etckeeper_plugin(self):
        """Check tracking /etc dir by etckeeper plugin

        Scenario:
        1. Revert snapshot with 1 node
        2. Download and install fuel-plugin-builder
        3. Clone plugin repo
        4. Build plugin
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Deploy cluster
        8. Check plugin

        Duration 50m
        """
        plugin_name = 'fuel-plugin-etckeeper'
        plugin_path = '/var'
        source_plugin_path = os.path.join(plugin_path, plugin_name)

        self.show_step(1)
        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(2)
        fpb = FuelPluginBuilder()
        fpb.fpb_install()

        ip = self.ssh_manager.admin_ip
        self.ssh_manager.execute_on_remote(ip=ip,
                                           cmd='git clone {0} {1}'.format(
                                               ETCKEEPER_PLUGIN_REPO,
                                               source_plugin_path))

        self.show_step(4)
        packet_name = fpb.fpb_build_plugin(source_plugin_path)

        self.show_step(5)
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.join(
                                            source_plugin_path, packet_name))

        self.show_step(6)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={'propagate_task_deploy': True})

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
        logger.info('Cluster is {!s}'.format(cluster_id))

        self.fuel_web.update_nodes(cluster_id, {'slave-01': ['controller']})

        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
        etckeeper_status = self.ssh_manager.execute_on_remote(
            ip=ip, cmd='etckeeper vcs status')
        if 'branch master' not in etckeeper_status['stdout_str']:
            raise Exception("The etckeeper has wrong status {0}".format(
                etckeeper_status['stdout_str']))

        new_config = 'test_config'
        self.ssh_manager.execute_on_remote(
            ip=ip, cmd='>>{0}'.format(os.path.join('/etc', new_config)))

        etckeeper_status = self.ssh_manager.execute_on_remote(
            ip=ip, cmd='etckeeper vcs status')
        if new_config not in etckeeper_status['stdout_str']:
            raise Exception(
                "The etckeeper does not tracked adding the new config: {0}, "
                "actual status: {1}".format(new_config,
                                            etckeeper_status['stdout_str']))
Exemplo n.º 20
0
    def deploy_emc_ha(self):
        """Deploy cluster in ha mode with emc plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin installation
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_emc
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.EMC_PLUGIN_PATH,
            tar_target='/var'
        )

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.EMC_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)

        # check plugin installed and attributes have emc options

        for option in ["emc_sp_a_ip", "emc_sp_b_ip",
                       "emc_username", "emc_password", "emc_pool_name"]:
            asserts.assert_true(option in attr["editable"]["emc_vnx"],
                                "{0} is not in cluster attributes: {1}".
                                format(option,
                                       str(attr["editable"]["storage"])))

        # disable LVM-based volumes

        attr["editable"]["storage"]["volumes_lvm"]["value"] = False

        # enable EMC plugin

        emc_options = attr["editable"]["emc_vnx"]
        emc_options["metadata"]["enabled"] = True
        emc_options["emc_sp_a_ip"]["value"] = settings.EMC_SP_A_IP
        emc_options["emc_sp_b_ip"]["value"] = settings.EMC_SP_B_IP
        emc_options["emc_username"]["value"] = settings.EMC_USERNAME
        emc_options["emc_password"]["value"] = settings.EMC_PASSWORD
        emc_options["emc_pool_name"]["value"] = settings.EMC_POOL_NAME

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # get remotes for all nodes

        controller_nodes = [self.fuel_web.get_nailgun_node_by_name(node)
                            for node in ['slave-01', 'slave-02', 'slave-03']]
        compute_nodes = [self.fuel_web.get_nailgun_node_by_name(node)
                         for node in ['slave-04', 'slave-05']]

        controller_remotes = [self.env.d_env.get_ssh_to_remote(node['ip'])
                              for node in controller_nodes]
        compute_remotes = [self.env.d_env.get_ssh_to_remote(node['ip'])
                           for node in compute_nodes]

        # check cinder-volume settings

        for node in controller_nodes:
            self.check_emc_cinder_config(
                ip=node['ip'], path='/etc/cinder/cinder.conf')
            self.check_emc_management_package(ip=node['ip'])

        # check cinder-volume layout on controllers

        cinder_volume_ctrls = [self.check_service(controller, "cinder-volume")
                               for controller in controller_remotes]
        asserts.assert_equal(sum(cinder_volume_ctrls), 1,
                             "Cluster has more than one "
                             "cinder-volume on controllers")

        # check cinder-volume layout on computes

        cinder_volume_comps = [self.check_service(compute, "cinder-volume")
                               for compute in compute_remotes]
        # closing connections
        for remote in controller_remotes:
            remote.clear()
        for remote in compute_remotes:
            remote.clear()

        asserts.assert_equal(sum(cinder_volume_comps), 0,
                             "Cluster has active cinder-volume on compute")

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_emc")
Exemplo n.º 21
0
    def deploy_elasticsearch_kibana_plugin(self):
        """Deploy a cluster with the Elasticsearch-Kibana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with elasticsearch_kibana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_elasticsearch_kibana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=ELASTICSEARCH_KIBANA_PLUGIN_PATH,
            tar_target='/var'
        )

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, self._name),
            msg)

        self.fuel_web.update_plugin_settings(cluster_id, self._name,
                                             self._version, {})

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': [self._role_name]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        es_server_ip = self.get_vip(cluster_id)
        assert_is_not_none(es_server_ip,
                           "Failed to get the IP of Elasticsearch server")

        logger.debug("Check that Elasticsearch is ready")

        r = requests.get("http://{}:9200/".format(es_server_ip))
        msg = "Elasticsearch responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the HTTP server is running")

        r = requests.get("http://{}/".format(es_server_ip))
        msg = "HTTP server responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_elasticsearch_kibana_plugin")
Exemplo n.º 22
0
    def separate_horizon_service(self):
        """Deploy cluster with 3 separate horizon roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with horizon role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_horizon_service
        """
        self.check_run("separate_horizon_service")
        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_HORIZON_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': '2',
            'tenant': 'separatehorizon',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-horizon'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-horizon'],
                'slave-05': ['standalone-horizon'],
                'slave-06': ['standalone-horizon'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_horizon_service", is_make=True)
    def deploy_zabbix_ceph_radosgw_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller,ceph-osd roles
            5. Add 2 node with compute,ceph-osd roles
            6. Deploy the cluster
            7. Run network verification
            8. Run OSTF
            9. Check zabbix service in pacemaker
            10. Check login to zabbix dashboard

        Duration 180m
        Snapshot deploy_zabbix_ceph_radosgw_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.ZABBIX_PLUGIN_PATH,
            tar_target="/var")
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.ZABBIX_PLUGIN_PATH))

        cluster_settings = {
            "net_provider": "neutron",
            "net_segment_type": settings.NEUTRON_SEGMENT_TYPE,
            'objects_ceph': True,
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False,
            'tenant': 'rados',
            'user': '******',
            'password': '******',
            'osd_pool_size': "3"
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=cluster_settings
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute', 'ceph-osd'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=190 * 60)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ceph_radosgw_ha")
Exemplo n.º 24
0
    def deploy_zabbix_ceph_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller,ceph-osd roles
            5. Add 2 node with compute,ceph-osd roles
            6. Deploy the cluster
            7. Run network verification
            8. Run OSTF
            9. Check zabbix service in pacemaker
            10. Check login to zabbix dashboard

        Duration 180m
        Snapshot deploy_zabbix_ceph_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.ZABBIX_PLUGIN_PATH,
                             tar_target="/var")
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.basename(
                                            settings.ZABBIX_PLUGIN_PATH))

        cluster_settings = {
            "net_provider": "neutron",
            "net_segment_type": settings.NEUTRON_SEGMENT_TYPE,
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False,
            'tenant': 'cephHA',
            'user': '******',
            'password': '******',
            'osd_pool_size': "3"
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=cluster_settings)

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute', 'ceph-osd'],
            })

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=190 * 60)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ceph_ha")
    def deploy_zabbix_snmp_extreme_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check Extreme Switch trigger with test SNMP message

        Duration 70m
        Snapshot deploy_zabbix_snmp_extreme_ha

        """
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH
        )
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_EXTREME_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH
        )
        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [settings.ZABBIX_PLUGIN_PATH,
                       settings.ZABBIX_SNMP_PLUGIN_PATH,
                       settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH]:
            utils.upload_tarball(
                ip=self.ssh_manager.admin_ip,
                tar_path=plugin,
                tar_target="/var")
            utils.install_plugin_check_code(
                ip=self.ssh_manager.admin_ip,
                plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)
        self.setup_snmp_plugin(cluster_id, snmp_community)
        self.setup_snmp_extreme_plugin(cluster_id)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_extreme_critical_command = \
            ("snmptrap -v 1 -c {snmp_community} {management_vip} "
             "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916"
             " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'"
             .format(snmp_community=snmp_community,
                     management_vip=management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_extreme_critical_command)

        public_vip = self.fuel_web.get_public_vip(cluster_id)
        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        wait(lambda: self.check_event_message(
            zabbix_web, 'extreme', 'Power Supply Failed'),
            timeout_msg='Power Supply Failed event not found in Zabbix')

        self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [settings.ZABBIX_PLUGIN_PATH,
                       settings.ZABBIX_SNMP_PLUGIN_PATH]:
            utils.upload_tarball(
                ip=self.ssh_manager.admin_ip,
                tar_path=plugin,
                tar_target="/var")
            utils.install_plugin_check_code(
                ip=self.ssh_manager.admin_ip,
                plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0,
                 timeout_msg='SNMP heartbeat status not found '
                             ' in /var/log/zabbix/zabbix_server.log')

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
    def vip_reservation_for_plugin_custom_ns(self):
        """Check vip reservation for custom ns plugin

        Scenario:
        1. Revert snapshot with 3 nodes
        2. Download and install fuel-plugin-builder
        3. Create plugin with predefined network_roles.yaml
        4. Build and copy plugin to /var
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Deploy cluster
        8. Check vip reservation

        Duration 40m
        """
        plugin_name = "vip_reservation_plugin"
        source_plugin_path = os.path.join("/root/", plugin_name)
        plugin_path = "/var"
        task_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = "tasks.yaml"
        net_role_file = "network_roles.yaml"
        metadata_file = "metadata.yaml"
        namespace = "custom_ns"
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(2)
        # initiate fuel plugin builder instance
        fpb = FuelPluginBuilder()
        # install fuel_plugin_builder on master node
        fpb.fpb_install()
        # create plugin template on the master node
        self.show_step(3)
        fpb.fpb_create_plugin(source_plugin_path)
        # replace plugin tasks, metadata, network_roles
        fpb.fpb_replace_plugin_content(
            os.path.join(task_path, net_role_file), os.path.join(source_plugin_path, net_role_file)
        )
        fpb.fpb_replace_plugin_content(
            os.path.join(task_path, tasks_file), os.path.join(source_plugin_path, tasks_file)
        )
        fpb.fpb_replace_plugin_content(
            os.path.join(task_path, metadata_file), os.path.join(source_plugin_path, metadata_file)
        )

        with YamlEditor(os.path.join(source_plugin_path, net_role_file), ip=fpb.admin_ip) as editor:
            editor.content[0]["properties"]["vip"][0]["namespace"] = namespace
            editor.content[1]["properties"]["vip"][0]["namespace"] = namespace
        # build plugin
        self.show_step(4)
        packet_name = fpb.fpb_build_plugin(source_plugin_path)
        # copy plugin archive file
        # to the /var directory on the master node
        fpb.fpb_copy_plugin(os.path.join(source_plugin_path, packet_name), plugin_path)
        self.show_step(5)
        # let's install plugin
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip, plugin=os.path.join(plugin_path, packet_name))
        self.show_step(6)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, mode=DEPLOYMENT_MODE)
        # get plugins from fuel and enable our one
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {"metadata/enabled": True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info("Cluster is {!s}".format(cluster_id))

        self.fuel_web.update_nodes(cluster_id, {"slave-01": ["controller"], "slave-02": ["compute"]})
        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            hiera_json_out = (
                'ruby -rhiera -rjson -e "h = Hiera.new(); '
                "Hiera.logger = 'noop'; "
                "puts JSON.dump(h.lookup('network_metadata', "
                '[], {}, nil, nil))"'
            )
            for vip in ("reserved_pub", "reserved_mng"):
                # get vips from hiera
                vip_hiera = json.loads(remote.execute(hiera_json_out)["stdout"][0])["vips"][vip]["ipaddr"]
                # get vips from database
                vip_db = self.env.postgres_actions.run_query(
                    db="nailgun",
                    query="select ip_addr from ip_addrs where " "vip_name = '\"'\"'{0}'\"'\"';".format(vip),
                )
                # get vips from corosync
                vip_crm = remote.execute("crm_resource --resource {0}{1} --get-parameter=ip".format("vip__", vip))[
                    "stdout"
                ][0].rstrip()
                # get vips from namespace
                vip_ns = (
                    remote.execute("ip netns exec {0} ip -4 a show {1}{2}".format(namespace, "b_", vip))["stdout"][1]
                    .split(" ")[5]
                    .split("/")[0]
                )
                vip_array = [vip_hiera, vip_db, vip_crm, vip_ns]
                for ip in vip_array[1:]:
                    asserts.assert_equal(
                        vip_array[0], ip, "Vip from hiera output {0} does not equal " "to {1}".format(vip_array[0], ip)
                    )
Exemplo n.º 28
0
    def deploy_cluster_with_reboot_plugin(self):
        """Add pre-deployment reboot task to nailgun via plugin.

        Scenario:
        1. Revert snapshot with 5 nodes
        2. Download and install fuel-plugin-builder
        3. Create plugin with reboot task
        4. Build plugin and copy it in var directory
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Provision nodes
        8. Collect timestamps from nodes
        9. Deploy cluster
        10. Check if timestamps are changed

        Duration 40m
        """
        # define some plugin related variables
        plugin_name = 'reboot_plugin'
        source_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        tasks_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'reboot_tasks.yaml'
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_5_slaves")
        # let's get ssh client for the master node

        # initiate fuel plugin builder instance
        self.show_step(2)
        fpb = FuelPluginBuilder()
        # install fuel_plugin_builder on master node
        fpb.fpb_install()
        # create plugin template on the master node
        self.show_step(3)
        fpb.fpb_create_plugin(source_plugin_path)
        # replace plugin tasks with our file
        fpb.fpb_replace_plugin_content(
            os.path.join(tasks_path, tasks_file),
            os.path.join(source_plugin_path, 'tasks.yaml'))
        # build plugin
        self.show_step(4)
        packet_name = fpb.fpb_build_plugin(source_plugin_path)
        fpb.fpb_copy_plugin(
            os.path.join(source_plugin_path, packet_name), plugin_path)
        self.show_step(5)
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.join(plugin_path, packet_name))
        self.show_step(6)
        # create cluster
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable our one
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('Cluster is {!s}'.format(cluster_id))

        self.fuel_web.update_nodes(
            cluster_id,
            {'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['compute'],
                'slave-04': ['ceph-osd']}
        )
        # firstly, let's provision nodes
        self.show_step(7)
        self.fuel_web.provisioning_cluster_wait(cluster_id)
        # after provision is done, collect timestamps from nodes
        old_timestamps = {}

        nodes = {
            'slave-01': True,
            'slave-02': True,
            'slave-03': False,
            'slave-04': True
        }
        self.show_step(8)
        for node in nodes:
            logger.debug(
                "Get init object creation time from node {0}".format(node))
            cmd = 'stat --printf=\'%Y\' /proc/1'
            with self.fuel_web.get_ssh_for_node(node) as node_ssh:
                old_timestamps[node] = node_ssh.execute(cmd)['stdout'][0]

        # start deploying nodes
        # here nodes with controller and ceph roles should be rebooted
        self.show_step(9)
        self.fuel_web.deploy_cluster_wait_progress(cluster_id, 30)

        # collect new timestamps and check them
        self.show_step(10)
        for node in nodes:
            logger.debug(
                "Get init object creation time from node {0}".format(node))
            cmd = 'stat --printf=\'%Y\' /proc/1'
            with self.fuel_web.get_ssh_for_node(node) as node_ssh:
                new_timestamp = node_ssh.execute(cmd)['stdout'][0]
            # compute node without ceph role shouldn't reboot
            if not nodes[node]:
                asserts.assert_equal(
                    new_timestamp, old_timestamps[node],
                    'The new timestamp {0} is not equal to old one {1}, '
                    'but it shouldn\'t for {2} node'
                    .format(new_timestamp, old_timestamps[node], node)
                )
            else:
                # other nodes should be rebooted and have new timestamps
                # greater than old
                asserts.assert_true(
                    new_timestamp > old_timestamps[node],
                    'The new timestamp {0} is not greater than old one {1} '
                    'but it should for node {2}'
                    .format(new_timestamp, old_timestamps[node], node)
                )
Exemplo n.º 29
0
    def deploy_ha_one_controller_glusterfs_simple(self):
        """Deploy cluster with one controller and glusterfs plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller and cinder roles
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_glusterfs
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=GLUSTER_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(GLUSTER_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'external_glusterfs'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {
            'metadata/enabled': True,
            'endpoint/value': GLUSTER_CLUSTER_ENDPOINT
        }
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-03'):
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                self.check_glusterfs_conf(
                    remote=remote,
                    path='/etc/cinder/glusterfs',
                    gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_glusterfs")
Exemplo n.º 30
0
    def deploy_ha_one_controller_glusterfs_simple(self):
        """Deploy cluster with one controller and glusterfs plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller and cinder roles
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_glusterfs
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=GLUSTER_PLUGIN_PATH,
            tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(GLUSTER_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'external_glusterfs'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True,
                   'endpoint/value': GLUSTER_CLUSTER_ENDPOINT}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-03'):
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                self.check_glusterfs_conf(
                    remote=remote,
                    path='/etc/cinder/glusterfs',
                    gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_glusterfs")
Exemplo n.º 31
0
    def deploy_zabbix_snmp_extreme_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check Extreme Switch trigger with test SNMP message

        Duration 70m
        Snapshot deploy_zabbix_snmp_extreme_ha

        """
        check_plugin_path_env(var_name='ZABBIX_SNMP_PLUGIN_PATH',
                              plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH)
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_EXTREME_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH)
        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [
                settings.ZABBIX_PLUGIN_PATH, settings.ZABBIX_SNMP_PLUGIN_PATH,
                settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH
        ]:
            utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                                 tar_path=plugin,
                                 tar_target="/var")
            utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                            plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)
        self.setup_snmp_plugin(cluster_id, snmp_community)
        self.setup_snmp_extreme_plugin(cluster_id)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_extreme_critical_command = \
            ("snmptrap -v 1 -c {snmp_community} {management_vip} "
             "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916"
             " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'"
             .format(snmp_community=snmp_community,
                     management_vip=management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_extreme_critical_command)

        public_vip = self.fuel_web.get_public_vip(cluster_id)
        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        wait(lambda: self.check_event_message(zabbix_web, 'extreme',
                                              'Power Supply Failed'))

        self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")
    def deploy_lma_toolchain(self):
        """Deploy cluster in HA mode with the LMA toolchain

        This also deploys the Elasticsearch-Kibana plugin and the
        InfluxDB-Grafana plugin since they work together with the LMA collector
        plugin.

        Scenario:
            1. Upload plugins to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute + cinder role
            6. Add 1 node with influxdb_grafana + elasticsearch_kibana +
               infrastructure_alerting roles
            7. Deploy the cluster
            8. Check that the plugins work
            9. Run OSTF

        Duration 150m
        Snapshot deploy_lma_toolchain

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # TODO(scroiset): use actions fuel_actions.py
        # upload_plugin and install_plugin
        # copy plugins to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.LMA_COLLECTOR_PLUGIN_PATH,
                             tar_target="/var")
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH,
            tar_target="/var")
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.INFLUXDB_GRAFANA_PLUGIN_PATH,
                             tar_target="/var")
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH,
                             tar_target="/var")

        # install plugins
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_COLLECTOR_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.INFLUXDB_GRAFANA_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        influxdb_user = "******"
        influxdb_pass = "******"
        influxdb_rootpass = "******"
        grafana_user = "******"
        grafana_pass = "******"
        mysql_dbname = "grafanalma"
        mysql_user = "******"
        mysql_pass = "******"
        nagios_pass = "******"
        plugins = [
            {
                'name': 'lma_collector',
                'version': '0.9.0',
                'options': {
                    'environment_label/value': 'deploy_lma_toolchain',
                    'elasticsearch_mode/value': 'local',
                    'influxdb_mode/value': 'local',
                    'alerting_mode/value': 'local',
                }
            },
            {
                'name': 'elasticsearch_kibana',
                'version': '0.9.0',
                'options': {}
            },
            {
                'name': 'lma_infrastructure_alerting',
                'version': '0.9.0',
                'options': {
                    'send_to/value': 'root@localhost',
                    'send_from/value': 'nagios@localhost',
                    'smtp_host/value': '127.0.0.1',
                    'nagios_password/value': nagios_pass,
                }
            },
            {
                'name': 'influxdb_grafana',
                'version': '0.9.0',
                'options': {
                    'influxdb_rootpass/value': influxdb_rootpass,
                    'influxdb_username/value': influxdb_user,
                    'influxdb_userpass/value': influxdb_pass,
                    'grafana_username/value': grafana_user,
                    'grafana_userpass/value': grafana_pass,
                    'mysql_mode/value': 'local',
                    'mysql_dbname/value': mysql_dbname,
                    'mysql_username/value': mysql_user,
                    'mysql_password/value': mysql_pass,
                }
            },
        ]
        for plugin in plugins:
            plugin_name = plugin['name']
            plugin_version = plugin['version']
            msg = "Plugin '{:s}' couldn't be found. " \
                  "Test aborted".format(plugin_name)
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            logger.debug('{:s} plugin is installed'.format(plugin_name))
            self.fuel_web.update_plugin_settings(cluster_id, plugin_name,
                                                 plugin_version,
                                                 plugin['options'])

        analytics_roles = [
            "influxdb_grafana", "elasticsearch_kibana",
            "infrastructure_alerting"
        ]
        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute", "cinder"],
                "slave-05": analytics_roles,
            })
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=9000)

        analytics_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, analytics_roles)
        msg = "One node with '{}' roles must be present, found {}".format(
            ' + '.join(analytics_roles), len(analytics_nodes))

        assert_true(len(analytics_nodes) == 1, msg)

        elasticsearch_kibana_vip = self.get_vip(cluster_id, 'es_vip_mgmt')
        influxdb_grafana_vip = self.get_vip(cluster_id, 'influxdb')
        nagios_vip = self.get_vip(cluster_id, 'infrastructure_alerting')
        assert_is_not_none(
            elasticsearch_kibana_vip,
            "Fail to retrieve the Elasticsearch/Kibana cluster VIP address")
        assert_is_not_none(
            influxdb_grafana_vip,
            "Fail to retrieve the InfluxDB/Grafana cluster VIP address")
        assert_is_not_none(
            nagios_vip,
            "Fail to retrieve the Infrastructure Alerting cluster VIP address")

        def assert_http_get_response(url, expected=200):
            r = requests.get(url)
            assert_equal(
                r.status_code, expected,
                "{} responded with {}, expected {}".format(
                    url, r.status_code, expected))

        logger.debug("Check that Elasticsearch is ready")
        assert_http_get_response(
            "http://{0}:9200/".format(elasticsearch_kibana_vip))

        logger.debug("Check that Kibana is ready")
        assert_http_get_response(
            "http://{0}/".format(elasticsearch_kibana_vip))

        logger.debug("Check that the root user can access InfluxDB")
        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        assert_http_get_response(
            influxdb_url.format(influxdb_grafana_vip, 'root',
                                influxdb_rootpass))
        logger.debug("Check that the LMA user can access InfluxDB")
        assert_http_get_response(
            influxdb_url.format(influxdb_grafana_vip, influxdb_user,
                                influxdb_pass))

        logger.debug("Check that the LMA user can access Grafana")
        assert_http_get_response("http://{0}:{1}@{2}:8000/api/org".format(
            grafana_user, grafana_pass, influxdb_grafana_vip))

        nagios_url = "http://{}:{}".format(nagios_vip, '8001')
        r = requests.get(nagios_url, auth=('nagiosadmin', nagios_pass))
        assert_equal(
            r.status_code, 200,
            "Nagios HTTP response code {}, expected {}".format(
                r.status_code, 200))
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_lma_toolchain")
Exemplo n.º 33
0
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.check_run("deploy_ha_one_controller_neutron_example_v3")
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_V3_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_V3_PATH)

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_V3_PATH,
                             tar_target='/var')
        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={"propagate_task_deploy": True})

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if fuel_plugin_example_v3_puppet called
        # between netconfig and connectivity_tests
        netconfig_str = 'MODULAR: netconfig/netconfig.pp'
        plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
        connect_str = 'MODULAR: netconfig/connectivity_tests.pp'
        checkers.check_log_lines_order(
            ip=slave1['ip'],
            log_file_path='/var/log/puppet.log',
            line_matcher=[netconfig_str, plugin_str, connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if service run on slave-03
        logger.debug("Checking service on node {0}".format('slave-03'))

        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.ssh_manager.execute_on_remote(ip=slave3['ip'],
                                                       cmd=cmd)
        process_count = len(res_pgrep['stdout'])
        assert_equal(
            1, process_count,
            "There should be 1 process 'fuel-simple-service',"
            " but {0} found {1} processes".format(cmd, process_count))

        # curl to service
        cmd_curl = 'curl localhost:8234'
        self.ssh_manager.execute_on_remote(ip=slave3['ip'], cmd=cmd_curl)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3",
                               is_make=True)
Exemplo n.º 34
0
    def deploy_elasticsearch_kibana_plugin(self):
        """Deploy a cluster with the Elasticsearch-Kibana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with elasticsearch_kibana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_elasticsearch_kibana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=ELASTICSEARCH_KIBANA_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, self._name),
                    msg)

        self.fuel_web.update_plugin_settings(cluster_id, self._name,
                                             self._version, {})

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': [self._role_name]
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)

        es_server_ip = self.get_vip(cluster_id)
        assert_is_not_none(es_server_ip,
                           "Failed to get the IP of Elasticsearch server")

        logger.debug("Check that Elasticsearch is ready")

        r = requests.get("http://{}:9200/".format(es_server_ip))
        msg = "Elasticsearch responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the HTTP server is running")

        r = requests.get("http://{}/".format(es_server_ip))
        msg = "HTTP server responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_elasticsearch_kibana_plugin")
Exemplo n.º 35
0
    def separate_db_ceph_service(self):
        """Deployment with separate db nodes and ceph for all

        Scenario:
            1. Install the plugin on the master node
            2. Create Ubuntu, Neutron VXLAN, ceph for all cluster
            3. Change ceph replication factor to 2
            4. Add 3 nodes with controller role
            5. Add 3 nodes with db role
            6. Add 1 compute node
            7. Add 2 ceph nodes
            8. Run network verification
            9. Deploy changes
            10. Run network verification
            11. Run OSTF tests

        Duration 120m
        Snapshot separate_db_ceph_service
        """
        self.check_run("separate_db_ceph_service")
        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'osd_pool_size': '2',
            'objects_ceph': True,
            'tenant': 'separatedbceph',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-database'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database'],
                'slave-05': ['standalone-database'],
                'slave-06': ['standalone-database'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            }
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("separate_db_ceph_service")
    def separate_keystone_ceph_service(self):
        """Deployment with separate keystone nodes and ceph for all

        Scenario:
            1. Install database and keystone plugins on the master node
            2. Create Ubuntu, Neutron VXLAN, ceph for all storages cluster
            3. Change ceph replication factor to 2
            4. Add 3 nodes with controller role
            5. Add 3 nodes with database+keystone role
            6. Add 1 compute node
            7. Add 2 ceph nodes
            8. Run network verification
            9. Deploy changes
            10. Run network verification
            11. Run OSTF tests

        Duration 120m
        Snapshot separate_keystone_ceph_service
        """
        self.check_run("separate_keystone_ceph_service")

        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH)
        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH)
        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
                             tar_target="/var")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': '2',
            'tenant': 'separatekeystoneceph',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_names = ['detach-database', 'detach-keystone']
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        for plugin_name in plugin_names:
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            options = {'metadata/enabled': True}
            self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database', 'standalone-keystone'],
                'slave-05': ['standalone-database', 'standalone-keystone'],
                'slave-06': ['standalone-database', 'standalone-keystone'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_keystone_ceph_service")
Exemplo n.º 37
0
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        check_plugin_path_env(var_name='ZABBIX_SNMP_PLUGIN_PATH',
                              plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [
                settings.ZABBIX_PLUGIN_PATH, settings.ZABBIX_SNMP_PLUGIN_PATH
        ]:
            utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                                 tar_path=plugin,
                                 tar_target="/var")
            utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                            plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0)

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
    def deploy_cluster_with_reboot_plugin(self):
        """Add pre-deployment reboot task to nailgun via plugin.

        Scenario:
        1. Revert snapshot with 5 nodes
        2. Download and install fuel-plugin-builder
        3. Create plugin with reboot task
        4. Build plugin and copy it in var directory
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Provision nodes
        8. Collect timestamps from nodes
        9. Deploy cluster
        10. Check if timestamps are changed

        Duration 40m
        """
        # define some plugin related variables
        plugin_name = 'reboot_plugin'
        source_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        tasks_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'reboot_tasks.yaml'
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_5_slaves")
        # let's get ssh client for the master node

        # initiate fuel plugin builder instance
        self.show_step(2)
        fpb = FuelPluginBuilder()
        # install fuel_plugin_builder on master node
        fpb.fpb_install()
        # create plugin template on the master node
        self.show_step(3)
        fpb.fpb_create_plugin(source_plugin_path)
        # replace plugin tasks with our file
        fpb.fpb_replace_plugin_content(
            os.path.join(tasks_path, tasks_file),
            os.path.join(source_plugin_path, 'tasks.yaml'))
        # build plugin
        self.show_step(4)
        packet_name = fpb.fpb_build_plugin(source_plugin_path)
        fpb.fpb_copy_plugin(os.path.join(source_plugin_path, packet_name),
                            plugin_path)
        self.show_step(5)
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.join(
                                            plugin_path, packet_name))
        self.show_step(6)
        # create cluster
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable our one
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('Cluster is {!s}'.format(cluster_id))

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['compute'],
                'slave-04': ['ceph-osd']
            })
        # firstly, let's provision nodes
        self.show_step(7)
        self.fuel_web.provisioning_cluster_wait(cluster_id)
        # after provision is done, collect timestamps from nodes
        old_timestamps = {}

        nodes = {
            'slave-01': True,
            'slave-02': True,
            'slave-03': False,
            'slave-04': True
        }
        self.show_step(8)
        for node in nodes:
            logger.debug(
                "Get init object creation time from node {0}".format(node))
            cmd = 'stat --printf=\'%Y\' /proc/1'
            with self.fuel_web.get_ssh_for_node(node) as node_ssh:
                old_timestamps[node] = node_ssh.execute(cmd)['stdout'][0]

        # start deploying nodes
        # here nodes with controller and ceph roles should be rebooted
        self.show_step(9)
        self.fuel_web.deploy_cluster_wait_progress(cluster_id, 30)

        # collect new timestamps and check them
        self.show_step(10)
        for node in nodes:
            logger.debug(
                "Get init object creation time from node {0}".format(node))
            cmd = 'stat --printf=\'%Y\' /proc/1'
            with self.fuel_web.get_ssh_for_node(node) as node_ssh:
                new_timestamp = node_ssh.execute(cmd)['stdout'][0]
            # compute node without ceph role shouldn't reboot
            if not nodes[node]:
                asserts.assert_equal(
                    new_timestamp, old_timestamps[node],
                    'The new timestamp {0} is not equal to old one {1}, '
                    'but it shouldn\'t for {2} node'.format(
                        new_timestamp, old_timestamps[node], node))
            else:
                # other nodes should be rebooted and have new timestamps
                # greater than old
                asserts.assert_true(
                    new_timestamp > old_timestamps[node],
                    'The new timestamp {0} is not greater than old one {1} '
                    'but it should for node {2}'.format(
                        new_timestamp, old_timestamps[node], node))
Exemplo n.º 39
0
    def vip_reservation_for_plugin(self):
        """Check vip reservation for fuel plugin

        Scenario:
        1. Revert snapshot with 3 nodes
        2. Download and install fuel-plugin-builder
        3. Create plugin with predefined network_roles.yaml
        4. Build and copy plugin to /var directory
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Deploy cluster
        8. Check vip reservation

        Duration 40m
        """
        plugin_name = 'vip_reservation_plugin'
        source_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        dir_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'tasks.yaml'
        net_role_file = 'network_roles.yaml'
        metadata_file = 'metadata.yaml'
        namespace = 'haproxy'

        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")
        # initiate fuel plugin builder instance
        fpb = FuelPluginBuilder()
        # install fuel_plugin_builder on master node
        self.show_step(2)
        fpb.fpb_install()
        # create plugin template on the master node
        self.show_step(3)
        fpb.fpb_create_plugin(source_plugin_path)
        # replace plugin tasks, metadata, network_roles
        fpb.fpb_replace_plugin_content(
            os.path.join(dir_path, net_role_file),
            os.path.join(source_plugin_path, net_role_file))
        fpb.fpb_replace_plugin_content(
            os.path.join(dir_path, tasks_file),
            os.path.join(source_plugin_path, tasks_file))
        fpb.fpb_replace_plugin_content(
            os.path.join(dir_path, metadata_file),
            os.path.join(source_plugin_path, metadata_file))
        # build plugin
        self.show_step(4)
        packet_name = fpb.fpb_build_plugin(source_plugin_path)
        # copy plugin archive file from nailgun container
        # to the /var directory on the master node
        fpb.fpb_copy_plugin(os.path.join(source_plugin_path, packet_name),
                            plugin_path)
        # let's install plugin
        self.show_step(5)
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.join(plugin_path, packet_name))
        self.show_step(6)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable our one
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('Cluster is {!s}'.format(cluster_id))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute']}
        )
        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            hiera_json_out = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \
                             "Hiera.logger = 'noop'; puts JSON.dump " \
                             "(h.lookup('network_metadata', " \
                             "[], {}, nil, nil))\""
            for vip in ('reserved_pub', 'reserved_mng'):
                # get vips from hiera
                vip_hiera = json.loads(
                    remote.execute(
                        hiera_json_out)['stdout'][0])["vips"][vip]["ipaddr"]
                # get vips from database
                vip_db = self.env.postgres_actions.run_query(
                    db='nailgun',
                    query="select ip_addr from ip_addrs where "
                          "vip_name = '\"'\"'{0}'\"'\"';".format(vip))
                vip_array = [vip_hiera, vip_db]
                for ip in vip_array[1:]:
                    asserts.assert_equal(
                        vip_array[0], ip,
                        "Vip from hiera output {0} does not equal "
                        "to {1}".format(vip_array[0], ip))
                vip_pcs = remote.execute(
                    'pcs resource show {0}{1}'.format(
                        'vip__', vip))['exit_code']
                asserts.assert_not_equal(0, vip_pcs,
                                         'The vip__{0} was found in '
                                         'pacemaker'.format(vip))
                vip_ns = remote.execute(
                    'ip netns exec {0} ip a | grep {1}{2}'.format(
                        namespace, 'b_', vip))['exit_code']
                asserts.assert_not_equal(0, vip_ns,
                                         'The {0} was found in {1} '
                                         'namespace'.format(vip, namespace))
Exemplo n.º 40
0
    def deploy_lma_toolchain(self):
        """Deploy cluster in HA mode with the LMA toolchain

        This also deploys the Elasticsearch-Kibana plugin and the
        InfluxDB-Grafana plugin since they work together with the LMA collector
        plugin.

        Scenario:
            1. Upload plugins to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute + cinder role
            6. Add 1 node with influxdb_grafana + elasticsearch_kibana +
               infrastructure_alerting roles
            7. Deploy the cluster
            8. Check that the plugins work
            9. Run OSTF

        Duration 150m
        Snapshot deploy_lma_toolchain

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # TODO(scroiset): use actions fuel_actions.py
        # upload_plugin and install_plugin
        # copy plugins to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.LMA_COLLECTOR_PLUGIN_PATH,
            tar_target="/var")
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH,
            tar_target="/var")
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.INFLUXDB_GRAFANA_PLUGIN_PATH,
            tar_target="/var")
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH,
            tar_target="/var")

        # install plugins
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_COLLECTOR_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.INFLUXDB_GRAFANA_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        influxdb_user = "******"
        influxdb_pass = "******"
        influxdb_rootpass = "******"
        grafana_user = "******"
        grafana_pass = "******"
        mysql_dbname = "grafanalma"
        mysql_user = "******"
        mysql_pass = "******"
        nagios_pass = "******"
        plugins = [
            {
                'name': 'lma_collector',
                'version': '0.9.0',
                'options': {
                    'environment_label/value': 'deploy_lma_toolchain',
                    'elasticsearch_mode/value': 'local',
                    'influxdb_mode/value': 'local',
                    'alerting_mode/value': 'local',
                }
            },
            {
                'name': 'elasticsearch_kibana',
                'version': '0.9.0',
                'options': {
                }
            },
            {
                'name': 'lma_infrastructure_alerting',
                'version': '0.9.0',
                'options': {
                    'send_to/value': 'root@localhost',
                    'send_from/value': 'nagios@localhost',
                    'smtp_host/value': '127.0.0.1',
                    'nagios_password/value': nagios_pass,
                }
            },
            {
                'name': 'influxdb_grafana',
                'version': '0.9.0',
                'options': {
                    'influxdb_rootpass/value': influxdb_rootpass,
                    'influxdb_username/value': influxdb_user,
                    'influxdb_userpass/value': influxdb_pass,
                    'grafana_username/value': grafana_user,
                    'grafana_userpass/value': grafana_pass,
                    'mysql_mode/value': 'local',
                    'mysql_dbname/value': mysql_dbname,
                    'mysql_username/value': mysql_user,
                    'mysql_password/value': mysql_pass,
                }
            },
        ]
        for plugin in plugins:
            plugin_name = plugin['name']
            plugin_version = plugin['version']
            msg = "Plugin '{:s}' couldn't be found. " \
                  "Test aborted".format(plugin_name)
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            logger.debug('{:s} plugin is installed'.format(plugin_name))
            self.fuel_web.update_plugin_settings(
                cluster_id, plugin_name,
                plugin_version, plugin['options'])

        analytics_roles = ["influxdb_grafana",
                           "elasticsearch_kibana",
                           "infrastructure_alerting"]
        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute", "cinder"],
                "slave-05": analytics_roles,
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=9000)

        analytics_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, analytics_roles
        )
        msg = "One node with '{}' roles must be present, found {}".format(
            ' + '.join(analytics_roles), len(analytics_nodes))

        assert_true(len(analytics_nodes) == 1, msg)

        elasticsearch_kibana_vip = self.get_vip(cluster_id, 'es_vip_mgmt')
        influxdb_grafana_vip = self.get_vip(cluster_id, 'influxdb')
        nagios_vip = self.get_vip(cluster_id, 'infrastructure_alerting')
        assert_is_not_none(
            elasticsearch_kibana_vip,
            "Fail to retrieve the Elasticsearch/Kibana cluster VIP address"
        )
        assert_is_not_none(
            influxdb_grafana_vip,
            "Fail to retrieve the InfluxDB/Grafana cluster VIP address"
        )
        assert_is_not_none(
            nagios_vip,
            "Fail to retrieve the Infrastructure Alerting cluster VIP address"
        )

        def assert_http_get_response(url, expected=200):
            r = requests.get(url)
            assert_equal(r.status_code, expected,
                         "{} responded with {}, expected {}".format(
                             url, r.status_code, expected))

        logger.debug("Check that Elasticsearch is ready")
        assert_http_get_response("http://{0}:9200/".format(
            elasticsearch_kibana_vip))

        logger.debug("Check that Kibana is ready")
        assert_http_get_response("http://{0}/".format(
            elasticsearch_kibana_vip))

        logger.debug("Check that the root user can access InfluxDB")
        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,
                                                     'root',
                                                     influxdb_rootpass))
        logger.debug("Check that the LMA user can access InfluxDB")
        assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,
                                                     influxdb_user,
                                                     influxdb_pass))

        logger.debug("Check that the LMA user can access Grafana")
        assert_http_get_response(
            "http://{0}:{1}@{2}:8000/api/org".format(grafana_user,
                                                     grafana_pass,
                                                     influxdb_grafana_vip))

        nagios_url = "http://{}:{}".format(nagios_vip, '8001')
        r = requests.get(nagios_url, auth=('nagiosadmin',
                                           nagios_pass))
        assert_equal(
            r.status_code, 200,
            "Nagios HTTP response code {}, expected {}".format(
                r.status_code, 200)
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_lma_toolchain")
Exemplo n.º 41
0
    def separate_haproxy(self):
        """Deploy HA environment with separate Haproxy.

        Scenario:
            1. Revert snapshot with ready master node
            2. Copy and install external-lb and detach-haproxy plugins
            3. Bootstrap 3 slaves from default nodegroup
            4. Create cluster with Neutron VXLAN and custom nodegroups
            5. Run 'fuel-mirror' to replace cluster repositories
               with local mirrors
            6. Bootstrap 2 slaves nodes from second nodegroup
               and one node from third node group
            7. Enable plugins for cluster
            8. Add 2 controllers from default nodegroup and 1 controller
               from second node group
            9. Add 1 compute+cinder from default node group
               and 1 compute+cinder from second node group
            10. Add haproxy node from third node group
            11. Verify networks
            12. Deploy cluster

        Duration 120m
        Snapshot separate_haproxy
        """

        if not MULTIPLE_NETWORKS:
            raise exceptions.FuelQAVariableNotSet('MULTIPLE_NETWORKS', 'true')

        self.show_step(1)
        self.env.revert_snapshot('ready')

        self.show_step(2)
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH,
                             tar_target="/var")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=SEPARATE_SERVICE_BALANCER_PLUGIN_PATH,
                             tar_target="/var")

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH))

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(SEPARATE_SERVICE_BALANCER_PLUGIN_PATH))

        self.show_step(3)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
        self.show_step(4)
        admin_ip = self.ssh_manager.admin_ip
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings={
                                                      'net_provider':
                                                      NEUTRON,
                                                      'net_segment_type':
                                                      NEUTRON_SEGMENT['tun'],
                                                      'tenant':
                                                      'separatehaproxy',
                                                      'user':
                                                      '******',
                                                      'password':
                                                      '******',
                                                      'ntp_list': [admin_ip],
                                                  })
        self.show_step(5)
        if MIRROR_UBUNTU != '':
            ubuntu_url = MIRROR_UBUNTU.split()[1]
            replace_cmd = \
                "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \
                " /usr/share/fuel-mirror/ubuntu.yaml".format(
                    ubuntu_url)
            self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=replace_cmd)
        create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_mirror_cmd)
        apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \
                           '--env {0} --replace'.format(cluster_id)
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd)

        self.show_step(6)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[6:7])

        self.show_step(7)
        plugin_name = 'detach_haproxy'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        plugin_name = 'external_loadbalancer'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.show_step(8)
        self.show_step(9)
        self.show_step(10)
        nodegroup1 = NODEGROUPS[0]['name']
        nodegroup2 = NODEGROUPS[1]['name']
        nodegroup3 = NODEGROUPS[2]['name']

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': [['controller'], nodegroup1],
                'slave-02': [['controller'], nodegroup1],
                'slave-04': [['compute', 'cinder'], nodegroup2],
                'slave-05': [['controller'], nodegroup2],
                'slave-03': [['compute', 'cinder'], nodegroup1],
                'slave-07': [['standalone-haproxy'], nodegroup3]
            })

        self.show_step(11)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(12)
        self.fuel_web.deploy_cluster_wait(cluster_id,
                                          timeout=180 * 60,
                                          check_services=False)

        self.env.make_snapshot('separate_haproxy')
Exemplo n.º 42
0
    def separate_haproxy(self):
        """Deploy HA environment with separate Haproxy.

        Scenario:
            1. Revert snapshot with ready master node
            2. Copy and install external-lb and detach-haproxy plugins
            3. Bootstrap 3 slaves from default nodegroup
            4. Create cluster with Neutron VXLAN and custom nodegroups
            5. Run 'fuel-mirror' to replace cluster repositories
               with local mirrors
            6. Bootstrap 2 slaves nodes from second nodegroup
               and one node from third node group
            7. Enable plugins for cluster
            8. Add 2 controllers from default nodegroup and 1 controller
               from second node group
            9. Add 1 compute+cinder from default node group
               and 1 compute+cinder from second node group
            10. Add haproxy node from third node group
            11. Verify networks
            12. Deploy cluster

        Duration 120m
        Snapshot separate_haproxy
        """

        if not MULTIPLE_NETWORKS:
            raise exceptions.FuelQAVariableNotSet(
                'MULTIPLE_NETWORKS', 'true')

        self.show_step(1)
        self.env.revert_snapshot('ready')

        self.show_step(2)
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH,
            tar_target="/var")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=SEPARATE_SERVICE_BALANCER_PLUGIN_PATH,
            tar_target="/var")

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH))

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                SEPARATE_SERVICE_BALANCER_PLUGIN_PATH))

        self.show_step(3)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
        self.show_step(4)
        admin_ip = self.ssh_manager.admin_ip
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings={
                'net_provider': NEUTRON,
                'net_segment_type': NEUTRON_SEGMENT['tun'],
                'tenant': 'separatehaproxy',
                'user': '******',
                'password': '******',
                'ntp_list': [admin_ip],
            }
        )
        self.show_step(5)
        if MIRROR_UBUNTU != '':
            ubuntu_url = MIRROR_UBUNTU.split()[1]
            replace_cmd = \
                "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \
                " /usr/share/fuel-mirror/ubuntu.yaml".format(
                    ubuntu_url)
            self.ssh_manager.execute_on_remote(ip=admin_ip,
                                               cmd=replace_cmd)
        create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_mirror_cmd)
        apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \
                           '--env {0} --replace'.format(cluster_id)
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd)

        self.show_step(6)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[6:7])

        self.show_step(7)
        plugin_name = 'detach_haproxy'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        plugin_name = 'external_loadbalancer'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.show_step(8)
        self.show_step(9)
        self.show_step(10)
        nodegroup1 = NODEGROUPS[0]['name']
        nodegroup2 = NODEGROUPS[1]['name']
        nodegroup3 = NODEGROUPS[2]['name']

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': [['controller'], nodegroup1],
                'slave-02': [['controller'], nodegroup1],
                'slave-04': [['compute', 'cinder'], nodegroup2],
                'slave-05': [['controller'], nodegroup2],
                'slave-03': [['compute', 'cinder'], nodegroup1],
                'slave-07': [['standalone-haproxy'], nodegroup3]
            }
        )

        self.show_step(11)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(12)
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60,
                                          check_services=False)

        self.env.make_snapshot('separate_haproxy')
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.check_run("deploy_ha_one_controller_neutron_example_v3")
        checkers.check_plugin_path_env(
            var_name='EXAMPLE_PLUGIN_V3_PATH',
            plugin_path=EXAMPLE_PLUGIN_V3_PATH
        )

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=EXAMPLE_PLUGIN_V3_PATH,
            tar_target='/var'
        )
        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={"propagate_task_deploy": True}
        )

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if fuel_plugin_example_v3_puppet called
        # between netconfig and connectivity_tests
        netconfig_str = 'MODULAR: netconfig/netconfig.pp'
        plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
        connect_str = 'MODULAR: netconfig/connectivity_tests.pp'
        checkers.check_log_lines_order(
            ip=slave1['ip'],
            log_file_path='/var/log/puppet.log',
            line_matcher=[netconfig_str,
                          plugin_str,
                          connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if service run on slave-03
        logger.debug("Checking service on node {0}".format('slave-03'))

        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.ssh_manager.execute_on_remote(
            ip=slave3['ip'],
            cmd=cmd
        )
        process_count = len(res_pgrep['stdout'])
        assert_equal(1, process_count,
                     "There should be 1 process 'fuel-simple-service',"
                     " but {0} found {1} processes".format(cmd, process_count))

        # curl to service
        cmd_curl = 'curl localhost:8234'
        self.ssh_manager.execute_on_remote(
            ip=slave3['ip'],
            cmd=cmd_curl
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3",
                               is_make=True)
Exemplo n.º 44
0
    def deploy_cluster_with_reboot_plugin_timeout(self):
        """Check deployment is failed by reboot task plugin.

        Scenario:
            1. Revert snapshot with 3 nodes
            2. Download and install fuel-plugin-builder
            3. Create plugin with reboot task,
               set timeout for reboot task as 1 second
            4. Build plugin
            5. Install plugin to fuel
            6. Create cluster and enable plugin
            7. Provision nodes
            8. Deploy cluster
            9. Check deployment was failed by reboot task
            10. Check error msg at the logs

        Duration 15m
        """
        # define some plugin related variables
        plugin_name = 'timeout_plugin'
        source_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        tasks_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'reboot_tasks.yaml'
        # start reverting snapshot
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")
        # let's get ssh client for the master node
        self.show_step(2)
        # initiate fuel plugin builder instance
        fpb = FuelPluginBuilder()
        # install fuel_plugin_builder on master node
        fpb.fpb_install()
        # change timeout to a new value '1'
        fpb.put_value_to_local_yaml(os.path.join(tasks_path, tasks_file),
                                    os.path.join('/tmp/', tasks_file),
                                    [1, 'parameters', 'timeout'],
                                    1)
        self.show_step(3)
        # create plugin template on the master node
        fpb.fpb_create_plugin(source_plugin_path)
        # replace plugin tasks with our file
        fpb.fpb_replace_plugin_content(
            os.path.join('/tmp/', tasks_file),
            os.path.join(source_plugin_path, 'tasks.yaml'))
        # build plugin
        self.show_step(4)
        packet_name = fpb.fpb_build_plugin(source_plugin_path)
        # copy plugin archive file
        # to the /var directory on the master node
        fpb.fpb_copy_plugin(
            os.path.join(source_plugin_path, packet_name),
            plugin_path)
        # let's install plugin
        self.show_step(5)
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.join(plugin_path, packet_name))
        # create cluster
        self.show_step(6)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable it
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('Cluster is {!s}'.format(cluster_id))

        self.fuel_web.update_nodes(
            cluster_id,
            {'slave-01': ['controller', 'ceph-osd']}
        )
        self.show_step(7)
        self.fuel_web.provisioning_cluster_wait(cluster_id)
        logger.info('Start cluster #%s deployment', cluster_id)
        self.show_step(8)
        task = self.fuel_web.client.deploy_nodes(cluster_id)
        self.show_step(9)
        self.fuel_web.assert_task_failed(task)

        msg = 'Time detection (1 sec) for node reboot has expired'
        cmd = 'grep "{0}" /var/log/astute/astute.log'.format(msg)
        self.show_step(10)
        with self.env.d_env.get_admin_remote() as admin_remote:
            result = admin_remote.execute(cmd)['stdout'][0]

        asserts.assert_true(
            msg in result,
            'Failed to find reboot plugin warning message in logs'
        )
    def deploy_neutron_example_ha_add_node(self):
        """Deploy and scale cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Add 2 nodes with controller role
            11. Deploy cluster
            12. Check plugin health
            13. Run OSTF

        Duration 150m
        Snapshot deploy_neutron_example_ha_add_node

        """
        checkers.check_plugin_path_env(
            var_name='EXAMPLE_PLUGIN_PATH',
            plugin_path=EXAMPLE_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=EXAMPLE_PLUGIN_PATH,
            tar_target='/var')

        # install plugin

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['tun'],
                "propagate_task_deploy": True
            }
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-04', 'slave-05'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'

            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(1, len(res_pgrep['stdout']),
                             'Failed with error {0} on the '
                             'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_curl['stderr'], node))

        # add verification here
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha_add_node")
Exemplo n.º 46
0
    def separate_rabbit_service(self):
        """Deploy cluster with 3 separate rabbit roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with rabbit role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_rabbit_service
        """
        self.check_run("separate_rabbit_service")
        checkers.check_plugin_path_env(
            var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH))

        data = {
            'tenant': 'separaterabbit',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-rabbitmq'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-rabbitmq'],
                'slave-05': ['standalone-rabbitmq'],
                'slave-06': ['standalone-rabbitmq'],
                'slave-07': ['compute'],
                'slave-08': ['cinder']
            }
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("separate_rabbit_service", is_make=True)
    def deploy_murano_with_glare_ha_one_controller(self):
        """Deploy cluster in ha mode with murano plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Add 1 node with murano role
            8. Deploy the cluster
            9. Run network verification
            10. Run sanity OSTF
            11. Run Murano Platform OSTF

        Duration 150m
        Snapshot deploy_murano_with_glare_ha_one_controller
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.MURANO_PLUGIN_PATH,
            tar_target="/var")
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.MURANO_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            configure_ssl=False
        )

        self.setup_murano_plugin(cluster_id, glare=True)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["compute"],
                "slave-03": ["cinder"],
                "slave-04": ["murano-node"]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['sanity'])

        logger.debug('Run OSTF platform tests')

        test_class_main = ('fuel_health.tests.tests_platform'
                           '.test_murano_linux.MuranoDeployLinuxServicesTests')
        tests_names = ['test_deploy_dummy_app_with_glare']

        test_classes = []

        for test_name in tests_names:
            test_classes.append('{0}.{1}'.format(test_class_main,
                                                 test_name))

        for test_name in test_classes:
            self.fuel_web.run_single_ostf_test(
                cluster_id=cluster_id, test_sets=['tests_platform'],
                test_name=test_name, timeout=60 * 20)

        self.env.make_snapshot("deploy_murano_with_glare_ha_one_controller")
Exemplo n.º 48
0
    def deploy_murano_with_glare_ha_one_controller(self):
        """Deploy cluster in ha mode with murano plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Add 1 node with murano role
            8. Deploy the cluster
            9. Run network verification
            10. Run sanity OSTF
            11. Run Murano Platform OSTF

        Duration 150m
        Snapshot deploy_murano_with_glare_ha_one_controller
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.MURANO_PLUGIN_PATH,
                             tar_target="/var")
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.basename(
                                            settings.MURANO_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            configure_ssl=False)

        self.setup_murano_plugin(cluster_id, glare=True)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["compute"],
                "slave-03": ["cinder"],
                "slave-04": ["murano-node"]
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['sanity'])

        logger.debug('Run OSTF platform tests')

        test_class_main = ('fuel_health.tests.tests_platform'
                           '.test_murano_linux.MuranoDeployLinuxServicesTests')
        tests_names = ['test_deploy_dummy_app_with_glare']

        test_classes = []

        for test_name in tests_names:
            test_classes.append('{0}.{1}'.format(test_class_main, test_name))

        for test_name in test_classes:
            self.fuel_web.run_single_ostf_test(cluster_id=cluster_id,
                                               test_sets=['tests_platform'],
                                               test_name=test_name,
                                               timeout=60 * 20)

        self.env.make_snapshot("deploy_murano_with_glare_ha_one_controller")
Exemplo n.º 49
0
    def vip_reservation_for_plugin(self):
        """Check vip reservation for fuel plugin

        Scenario:
        1. Revert snapshot with 3 nodes
        2. Download and install fuel-plugin-builder
        3. Create plugin with predefined network_roles.yaml
        4. Build and copy plugin to /var directory
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Deploy cluster
        8. Check vip reservation

        Duration 40m
        """
        plugin_name = 'vip_reservation_plugin'
        source_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        dir_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'tasks.yaml'
        net_role_file = 'network_roles.yaml'
        metadata_file = 'metadata.yaml'
        namespace = 'haproxy'

        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")
        # initiate fuel plugin builder instance
        fpb = FuelPluginBuilder()
        # install fuel_plugin_builder on master node
        self.show_step(2)
        fpb.fpb_install()
        # create plugin template on the master node
        self.show_step(3)
        fpb.fpb_create_plugin(source_plugin_path)
        # replace plugin tasks, metadata, network_roles
        fpb.fpb_replace_plugin_content(
            os.path.join(dir_path, net_role_file),
            os.path.join(source_plugin_path, net_role_file))
        fpb.fpb_replace_plugin_content(
            os.path.join(dir_path, tasks_file),
            os.path.join(source_plugin_path, tasks_file))
        fpb.fpb_replace_plugin_content(
            os.path.join(dir_path, metadata_file),
            os.path.join(source_plugin_path, metadata_file))
        # build plugin
        self.show_step(4)
        packet_name = fpb.fpb_build_plugin(source_plugin_path)
        # copy plugin archive file from nailgun container
        # to the /var directory on the master node
        fpb.fpb_copy_plugin(os.path.join(source_plugin_path, packet_name),
                            plugin_path)
        # let's install plugin
        self.show_step(5)
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.join(
                                            plugin_path, packet_name))
        self.show_step(6)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable our one
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('Cluster is {!s}'.format(cluster_id))

        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })
        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            hiera_json_out = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \
                             "Hiera.logger = 'noop'; puts JSON.dump " \
                             "(h.lookup('network_metadata', " \
                             "[], {}, nil, nil))\""
            for vip in ('reserved_pub', 'reserved_mng'):
                # get vips from hiera
                vip_hiera = json.loads(
                    remote.execute(hiera_json_out)['stdout']
                    [0])["vips"][vip]["ipaddr"]
                # get vips from database
                vip_db = self.env.postgres_actions.run_query(
                    db='nailgun',
                    query="select ip_addr from ip_addrs where "
                    "vip_name = '\"'\"'{0}'\"'\"';".format(vip))
                vip_array = [vip_hiera, vip_db]
                for ip in vip_array[1:]:
                    asserts.assert_equal(
                        vip_array[0], ip,
                        "Vip from hiera output {0} does not equal "
                        "to {1}".format(vip_array[0], ip))
                vip_pcs = remote.execute('pcs resource show {0}{1}'.format(
                    'vip__', vip))['exit_code']
                asserts.assert_not_equal(
                    0, vip_pcs, 'The vip__{0} was found in '
                    'pacemaker'.format(vip))
                vip_ns = remote.execute(
                    'ip netns exec {0} ip a | grep {1}{2}'.format(
                        namespace, 'b_', vip))['exit_code']
                asserts.assert_not_equal(
                    0, vip_ns, 'The {0} was found in {1} '
                    'namespace'.format(vip, namespace))
Exemplo n.º 50
0
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster with one controller and example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        segment_type = NEUTRON_SEGMENT['vlan']
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      segment_type,
                                                      "propagate_task_deploy":
                                                      True
                                                  })

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
Exemplo n.º 51
0
    def deploy_neutron_lbaas_simple_reset_ready(self):
        """Deploy and re-deploy cluster in simple mode with LbaaS plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check health of lbaas agent on the node
            9. Create pool and vip
            10. Reset cluster
            11. Add 1 compute
            12. Re-deploy cluster
            13. Check health of lbaas agent on the node
            14. Create pool and vip
            15. Run OSTF

        Duration 65m
        Snapshot deploy_neutron_lbaas_simple_reset_ready

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=LBAAS_PLUGIN_PATH,
            tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(LBAAS_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_SIMPLE,
        )

        plugin_name = 'lbaas'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        logger.debug('we have lbaas element')
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        asserts.assert_equal(str(cluster['net_provider']), 'neutron')

        self.fuel_web.verify_network(cluster_id)

        controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
        os_conn = os_actions.OpenStackActions(controller['ip'])

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbaas_work(os_conn)

        self.fuel_web.stop_reset_env_wait(cluster_id)

        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:2])

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-03': ['compute'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbaas_work(os_conn)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_lbaas_simple_reset_ready")
Exemplo n.º 52
0
    def deploy_emc_ha(self):
        """Deploy cluster in ha mode with emc plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin installation
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_emc
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.EMC_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.basename(
                                            settings.EMC_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)

        # check plugin installed and attributes have emc options

        for option in [
                "emc_sp_a_ip", "emc_sp_b_ip", "emc_username", "emc_password",
                "emc_pool_name"
        ]:
            asserts.assert_true(
                option in attr["editable"]["emc_vnx"],
                "{0} is not in cluster attributes: {1}".format(
                    option, str(attr["editable"]["storage"])))

        # disable LVM-based volumes

        attr["editable"]["storage"]["volumes_lvm"]["value"] = False

        # enable EMC plugin

        emc_options = attr["editable"]["emc_vnx"]
        emc_options["metadata"]["enabled"] = True
        emc_options["emc_sp_a_ip"]["value"] = settings.EMC_SP_A_IP
        emc_options["emc_sp_b_ip"]["value"] = settings.EMC_SP_B_IP
        emc_options["emc_username"]["value"] = settings.EMC_USERNAME
        emc_options["emc_password"]["value"] = settings.EMC_PASSWORD
        emc_options["emc_pool_name"]["value"] = settings.EMC_POOL_NAME

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # get remotes for all nodes

        controller_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-01', 'slave-02', 'slave-03']
        ]
        compute_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-04', 'slave-05']
        ]

        controller_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in controller_nodes
        ]
        compute_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in compute_nodes
        ]

        # check cinder-volume settings

        for node in controller_nodes:
            self.check_emc_cinder_config(ip=node['ip'],
                                         path='/etc/cinder/cinder.conf')
            self.check_emc_management_package(ip=node['ip'])

        # check cinder-volume layout on controllers

        cinder_volume_ctrls = [
            self.check_service(controller, "cinder-volume")
            for controller in controller_remotes
        ]
        asserts.assert_equal(
            sum(cinder_volume_ctrls), 1, "Cluster has more than one "
            "cinder-volume on controllers")

        # check cinder-volume layout on computes

        cinder_volume_comps = [
            self.check_service(compute, "cinder-volume")
            for compute in compute_remotes
        ]
        # closing connections
        for remote in controller_remotes:
            remote.clear()
        for remote in compute_remotes:
            remote.clear()

        asserts.assert_equal(sum(cinder_volume_comps), 0,
                             "Cluster has active cinder-volume on compute")

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_emc")
Exemplo n.º 53
0
    def etckeeper_plugin(self):
        """Check tracking /etc dir by etckeeper plugin

        Scenario:
        1. Revert snapshot with 1 node
        2. Download and install fuel-plugin-builder
        3. Clone plugin repo
        4. Build plugin
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Deploy cluster
        8. Check plugin

        Duration 50m
        """
        plugin_name = 'fuel-plugin-etckeeper'
        plugin_path = '/var'
        source_plugin_path = os.path.join(plugin_path, plugin_name)

        self.show_step(1)
        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(2)
        fpb = FuelPluginBuilder()
        fpb.fpb_install()

        ip = self.ssh_manager.admin_ip
        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='git clone {0} {1}'.format(
                ETCKEEPER_PLUGIN_REPO, source_plugin_path))

        self.show_step(4)
        packet_name = fpb.fpb_build_plugin(source_plugin_path)

        self.show_step(5)
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.join(source_plugin_path, packet_name))

        self.show_step(6)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={'propagate_task_deploy': True}
        )

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
        logger.info('Cluster is {!s}'.format(cluster_id))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller']}
        )

        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
        etckeeper_status = self.ssh_manager.execute_on_remote(
            ip=ip, cmd='etckeeper vcs status')
        if 'branch master' not in etckeeper_status['stdout_str']:
            raise Exception("The etckeeper has wrong status {0}".format(
                etckeeper_status['stdout_str']))

        new_config = 'test_config'
        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='>>{0}'.format(os.path.join('/etc', new_config)))

        etckeeper_status = self.ssh_manager.execute_on_remote(
            ip=ip, cmd='etckeeper vcs status')
        if new_config not in etckeeper_status['stdout_str']:
            raise Exception(
                "The etckeeper does not tracked adding the new config: {0}, "
                "actual status: {1}".format(
                    new_config, etckeeper_status['stdout_str']))