Ejemplo n.º 1
0
def prepare_contrail_plugin(obj, slaves=None, options={}):
    """Copy necessary packages to the master node and install them"""

    obj.env.revert_snapshot("ready_with_%d_slaves" % slaves)

    # copy plugin to the master node
    checkers.upload_tarball(
        obj.env.d_env.get_admin_remote(),
        CONTRAIL_PLUGIN_PATH, '/var')

    # install plugin
    checkers.install_plugin_check_code(
        obj.env.d_env.get_admin_remote(),
        plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))
    # FIXME: when opencontrail v3.0 is available
    # FIXME: remove the following line from 'or True'
    if obj.CONTRAIL_DISTRIBUTION == 'juniper' or True:
        # copy additional packages to the master node
        upload_contrail_packages(obj)

        # install packages
        install_packages(obj, obj.env.d_env.get_admin_remote())

    # prepare fuel
    openstack.assign_net_provider(obj, **options)
Ejemplo n.º 2
0
    def _bootstrap(self):

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugin to the master node
            checkers.upload_tarball(remote,
                                    conf.LMA_INFRA_ALERTING_PLUGIN_PATH,
                                    "/var")

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        plugin_options = {
            'send_to/value': 'root@localhost',
            'send_from/value': 'nagios@localhost',
            'smtp_host/value': '127.0.0.1',
            'nagios_password/value': self._nagios_password,
        }

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, self._name),
                    msg)
        logger.debug('%s (%s) plugin is installed' %
                     (self._name, self._version))
        self.fuel_wb.update_plugin_settings(cluster_id, self._name,
                                            self._version, plugin_options)

        return cluster_id
Ejemplo n.º 3
0
    def _prepare_contrail_plugin(self, slaves=None, pub_net=False):
        """Copy necessary packages to the master node and install them"""

        self.env.revert_snapshot("ready_with_%d_slaves" % slaves)

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugin to the master node
            checkers.upload_tarball(
                remote,
                CONTRAIL_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))

            # copy additional packages to the master node
            self._upload_contrail_packages(remote)

            # install packages
            self._install_packages(remote)

        # prepare fuel
        self._assign_net_provider(pub_net)
Ejemplo n.º 4
0
    def install_plugin(self):
        """Install plugin to Fuel"""
        assert_true(self.plugin_path, "plugin_path is not specified")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.install_plugin_check_code(remote,
                                               plugin=os.path.basename(
                                                   self.plugin_path))
Ejemplo n.º 5
0
def install_dvs_plugin(master_node):
    """Download and instal DVS plugin on master node."""
    # copy plugins to the master node
    checkers.upload_tarball(master_node, DVS_PLUGIN_PATH, "/var")

    # install plugin
    checkers.install_plugin_check_code(
        master_node, plugin=os.path.basename(DVS_PLUGIN_PATH))
Ejemplo n.º 6
0
    def install_plugin(self):
        """Install plugin to Fuel"""
        assert_true(self.plugin_path, "plugin_path is not specified")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(self.plugin_path))
Ejemplo n.º 7
0
    def deploy_neutron_lbaas_simple(self):
        """Deploy cluster in simple mode with LbaaS plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check health of lbaas agent on the node
            9. Create pool and vip
            10. Run OSTF

        Duration 35m
        Snapshot deploy_neutron_vlan_lbaas_simple

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(remote, LBAAS_PLUGIN_PATH, "/var")

            # install plugin

            checkers.install_plugin_check_code(remote, plugin=os.path.basename(LBAAS_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__, mode=DEPLOYMENT_MODE_SIMPLE)

        plugin_name = "lbaas"
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        logger.debug("we have lbaas element")
        options = {"metadata/enabled": True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {"slave-01": ["controller"], "slave-02": ["compute"], "slave-03": ["compute"]}
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        asserts.assert_equal(str(cluster["net_provider"]), "neutron")

        self.fuel_web.verify_network(cluster_id)

        controller = self.fuel_web.get_nailgun_node_by_name("slave-01")
        os_conn = os_actions.OpenStackActions(controller["ip"])

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbaas_work(os_conn)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_vlan_lbaas_simple")
Ejemplo n.º 8
0
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, conf.ZABBIX_PLUGIN_PATH, "/var")
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.ZABBIX_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ha")
Ejemplo n.º 9
0
def install_dvs_plugin(master_node):
    """Download and instal DVS plugin on master node."""
    # copy plugins to the master node
    checkers.upload_tarball(
        master_node,
        DVS_PLUGIN_PATH, "/var")

    # install plugin
    checkers.install_plugin_check_code(
        master_node,
        plugin=os.path.basename(DVS_PLUGIN_PATH))
Ejemplo n.º 10
0
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(remote, conf.ZABBIX_PLUGIN_PATH, "/var")
            checkers.install_plugin_check_code(remote,
                                               plugin=os.path.basename(
                                                   conf.ZABBIX_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ha")
Ejemplo n.º 11
0
def prepare_test_plugin(
        obj, slaves=None, pub_all_nodes=False, ceph_value=False):
    """Copy necessary packages to the master node and install them"""

    obj.env.revert_snapshot("ready_with_%d_slaves" % slaves)

    # copy plugin to the master node
    checkers.upload_tarball(
        obj.env.d_env.get_admin_remote(),
        PLUGIN_PATH, '/var')

    # install plugin
    checkers.install_plugin_check_code(
        obj.env.d_env.get_admin_remote(),
        plugin=os.path.basename(PLUGIN_PATH))

    # prepare fuel
    openstack.assign_net_provider(obj, pub_all_nodes, ceph_value)
Ejemplo n.º 12
0
    def _prepare_contrail_plugin(self, slaves=None, pub_net=False):
        """Copy necessary packages to the master node and install them"""

        self.env.revert_snapshot("ready_with_%d_slaves" % slaves)

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugin to the master node
            checkers.upload_tarball(remote, CONTRAIL_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote, plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))

            # copy additional packages to the master node
            self._upload_contrail_packages(remote)

            # install packages
            self._install_packages(remote)

        # prepare fuel
        self._assign_net_provider(pub_net)
Ejemplo n.º 13
0
    def _bootstrap(self):

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugin to the master node
            checkers.upload_tarball(
                remote,
                conf.LMA_INFRA_ALERTING_PLUGIN_PATH, "/var")

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        plugin_options = {
            'send_to/value': 'root@localhost',
            'send_from/value': 'nagios@localhost',
            'smtp_host/value': '127.0.0.1',
            'nagios_password/value': self._nagios_password,
        }

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, self._name),
                    msg)
        logger.debug('%s (%s) plugin is installed' % (self._name,
                                                      self._version))
        self.fuel_wb.update_plugin_settings(cluster_id,
                                            self._name,
                                            self._version,
                                            plugin_options)

        return cluster_id
Ejemplo n.º 14
0
    def deploy_neutron_example_ha_add_node(self):
        """Deploy and scale cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Add 2 nodes with controller role
            11. Deploy cluster
            12. Check plugin health
            13. Run OSTF

        Duration 150m
        Snapshot deploy_neutron_example_ha_add_node

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, EXAMPLE_PLUGIN_PATH, '/var')

            # install plugin

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['tun'],
            }
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-04', 'slave-05'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'

            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(1, len(res_pgrep['stdout']),
                             'Failed with error {0} on the '
                             'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_curl['stderr'], node))

        # add verification here
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha_add_node")
Ejemplo n.º 15
0
    def deploy_lma_toolchain(self):
        """Deploy cluster in HA mode with the LMA toolchain

        This also deploys the Elasticsearch-Kibana plugin and the
        InfluxDB-Grafana plugin since they work together with the LMA collector
        plugin.

        Scenario:
            1. Upload plugins to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute + cinder role
            6. Add 1 node with influxdb_grafana + elasticsearch_kibana +
               infrastructure_alerting roles
            7. Deploy the cluster
            8. Check that the plugins work
            9. Run OSTF

        Duration 150m
        Snapshot deploy_lma_toolchain

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # TODO(scroiset): use actions fuel_actions.py
        # upload_plugin and install_plugin
        with self.env.d_env.get_admin_remote() as remote:
            # copy plugins to the master node
            checkers.upload_tarball(
                remote,
                conf.LMA_COLLECTOR_PLUGIN_PATH, "/var")
            checkers.upload_tarball(
                remote,
                conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH, "/var")
            checkers.upload_tarball(
                remote,
                conf.INFLUXDB_GRAFANA_PLUGIN_PATH, "/var")
            checkers.upload_tarball(
                remote,
                conf.LMA_INFRA_ALERTING_PLUGIN_PATH, "/var")

            # install plugins
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_COLLECTOR_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.INFLUXDB_GRAFANA_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        influxdb_user = "******"
        influxdb_pass = "******"
        influxdb_rootpass = "******"
        grafana_user = "******"
        grafana_pass = "******"
        mysql_dbname = "grafanalma"
        mysql_user = "******"
        mysql_pass = "******"
        nagios_pass = "******"
        plugins = [
            {
                'name': 'lma_collector',
                'version': '0.9.0',
                'options': {
                    'environment_label/value': 'deploy_lma_toolchain',
                    'elasticsearch_mode/value': 'local',
                    'influxdb_mode/value': 'local',
                    'alerting_mode/value': 'local',
                }
            },
            {
                'name': 'elasticsearch_kibana',
                'version': '0.9.0',
                'options': {
                }
            },
            {
                'name': 'lma_infrastructure_alerting',
                'version': '0.9.0',
                'options': {
                    'send_to/value': 'root@localhost',
                    'send_from/value': 'nagios@localhost',
                    'smtp_host/value': '127.0.0.1',
                    'nagios_password/value': nagios_pass,
                }
            },
            {
                'name': 'influxdb_grafana',
                'version': '0.9.0',
                'options': {
                    'influxdb_rootpass/value': influxdb_rootpass,
                    'influxdb_username/value': influxdb_user,
                    'influxdb_userpass/value': influxdb_pass,
                    'grafana_username/value': grafana_user,
                    'grafana_userpass/value': grafana_pass,
                    'mysql_mode/value': 'local',
                    'mysql_dbname/value': mysql_dbname,
                    'mysql_username/value': mysql_user,
                    'mysql_password/value': mysql_pass,
                }
            },
        ]
        for plugin in plugins:
            plugin_name = plugin['name']
            plugin_version = plugin['version']
            msg = "Plugin '%s' couldn't be found. Test aborted" % plugin_name
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            logger.debug('%s plugin is installed' % plugin_name)
            self.fuel_web.update_plugin_settings(
                cluster_id, plugin_name,
                plugin_version, plugin['options'])

        analytics_roles = ["influxdb_grafana",
                           "elasticsearch_kibana",
                           "infrastructure_alerting"]
        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute", "cinder"],
                "slave-05": analytics_roles,
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=9000)

        analytics_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, analytics_roles
        )
        msg = "One node with '{}' roles must be present, found {}".format(
            ' + '.join(analytics_roles), len(analytics_nodes))

        assert_true(len(analytics_nodes) == 1, msg)

        elasticsearch_kibana_vip = self.get_vip(cluster_id, 'es_vip_mgmt')
        influxdb_grafana_vip = self.get_vip(cluster_id, 'influxdb')
        nagios_vip = self.get_vip(cluster_id, 'infrastructure_alerting')
        assert_is_not_none(
            elasticsearch_kibana_vip,
            "Fail to retrieve the Elasticsearch/Kibana cluster VIP address"
        )
        assert_is_not_none(
            influxdb_grafana_vip,
            "Fail to retrieve the InfluxDB/Grafana cluster VIP address"
        )
        assert_is_not_none(
            nagios_vip,
            "Fail to retrieve the Infrastructure Alerting cluster VIP address"
        )

        def assert_http_get_response(url, expected=200):
            r = requests.get(url)
            assert_equal(r.status_code, expected,
                         "{} responded with {}, expected {}".format(
                             url, r.status_code, expected))

        logger.debug("Check that Elasticsearch is ready")
        assert_http_get_response("http://{0}:9200/".format(
            elasticsearch_kibana_vip))

        logger.debug("Check that Kibana is ready")
        assert_http_get_response("http://{0}/".format(
            elasticsearch_kibana_vip))

        logger.debug("Check that the root user can access InfluxDB")
        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,
                                                     'root',
                                                     influxdb_rootpass))
        logger.debug("Check that the LMA user can access InfluxDB")
        assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,
                                                     influxdb_user,
                                                     influxdb_pass))

        logger.debug("Check that the LMA user can access Grafana")
        assert_http_get_response(
            "http://{0}:{1}@{2}:8000/api/org".format(grafana_user,
                                                     grafana_pass,
                                                     influxdb_grafana_vip))

        nagios_url = "http://{}:{}".format(nagios_vip, '8001')
        r = requests.get(nagios_url, auth=('nagiosadmin',
                                           nagios_pass))
        assert_equal(
            r.status_code, 200,
            "Nagios HTTP response code {}, expected {}".format(
                r.status_code, 200)
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_lma_toolchain")
Ejemplo n.º 16
0
    def deploy_ha_one_controller_glusterfs_simple(self):
        """Deploy cluster with one controller and glusterfs plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller and cinder roles
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_glusterfs
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(
                remote, GLUSTER_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(GLUSTER_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'external_glusterfs'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True,
                   'endpoint/value': GLUSTER_CLUSTER_ENDPOINT}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-03'):
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                self.check_glusterfs_conf(
                    remote=remote,
                    path='/etc/cinder/glusterfs',
                    gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_glusterfs")
Ejemplo n.º 17
0
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            for plugin in [
                    conf.ZABBIX_PLUGIN_PATH, conf.ZABBIX_SNMP_PLUGIN_PATH
            ]:
                checkers.upload_tarball(remote, plugin, "/var")
                checkers.install_plugin_check_code(
                    remote, plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0)

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
Ejemplo n.º 18
0
    def deploy_zabbix_snmp_extreme_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check Extreme Switch trigger with test SNMP message

        Duration 70m
        Snapshot deploy_zabbix_snmp_extreme_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            for plugin in [
                    conf.ZABBIX_PLUGIN_PATH, conf.ZABBIX_SNMP_PLUGIN_PATH,
                    conf.ZABBIX_SNMP_EXTREME_PLUGIN_PATH
            ]:
                checkers.upload_tarball(remote, plugin, "/var")
                checkers.install_plugin_check_code(
                    remote, plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)
        self.setup_snmp_plugin(cluster_id, snmp_community)
        self.setup_snmp_extreme_plugin(cluster_id)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_extreme_critical_command = \
            ("snmptrap -v 1 -c {snmp_community} {management_vip} "
             "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916"
             " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'"
             .format(snmp_community=snmp_community,
                     management_vip=management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_extreme_critical_command)

        public_vip = self.fuel_web.get_public_vip(cluster_id)
        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        wait(lambda: self.check_event_message(zabbix_web, 'extreme',
                                              'Power Supply Failed'))

        self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")
Ejemplo n.º 19
0
    def deploy_cluster_with_reboot_plugin(self):
        """Add pre-deployment reboot task to nailgun via plugin.

        Scenario:
        1. Revert snapshot with 5 nodes
        2. Download and install fuel-plugin-builder
        3. Create plugin with reboot task
        4. Build and copy plugin from container nailgun
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Provision nodes
        8. Collect timestamps from nodes
        9. Deploy cluster
        10. Check if timestamps are changed

        Duration 40m
        """
        # define some plugin related variables
        plugin_name = 'reboot_plugin'
        container_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        tasks_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'reboot_tasks.yaml'
        self.env.revert_snapshot("ready_with_5_slaves")
        # let's get ssh client for the master node

        with self.env.d_env.get_admin_remote() as admin_remote:
            # initiate fuel plugin builder instance
            fpb = FuelPluginBuilder()
            # install fuel_plugin_builder on master node
            fpb.fpb_install()
            # create plugin template on the master node
            fpb.fpb_create_plugin(container_plugin_path)
            # replace plugin tasks with our file
            fpb.fpb_replace_plugin_content(
                os.path.join(tasks_path, tasks_file),
                os.path.join(container_plugin_path, 'tasks.yaml'))
            # build plugin
            packet_name = fpb.fpb_build_plugin(container_plugin_path)
            # copy plugin archive file from nailgun container
            # to the /var directory on the master node
            fpb.fpb_copy_plugin_from_container(
                container_plugin_path,
                packet_name,
                plugin_path)
            # let's install plugin
            checkers.install_plugin_check_code(
                admin_remote,
                plugin=os.path.join(plugin_path, packet_name))

        # create cluster
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable our one
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('cluster is %s' % str(cluster_id))

        self.fuel_web.update_nodes(
            cluster_id,
            {'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['compute'],
                'slave-04': ['ceph-osd']}
        )
        # firstly, let's provision nodes
        self.fuel_web.provisioning_cluster_wait(cluster_id)
        # after provision is done, collect timestamps from nodes
        old_timestamps = {}

        nodes = {
            'slave-01': True,
            'slave-02': True,
            'slave-03': False,
            'slave-04': True
        }

        for node in nodes:
            logger.debug(
                "Get init object creation time from node {0}".format(node))
            cmd = 'stat --printf=\'%Y\' /proc/1'
            with self.fuel_web.get_ssh_for_node(node) as node_ssh:
                old_timestamps[node] = node_ssh.execute(cmd)['stdout'][0]

        # start deploying nodes
        # here nodes with controller and ceph roles should be rebooted
        self.fuel_web.deploy_cluster_wait_progress(cluster_id, 30)

        # collect new timestamps and check them
        for node in nodes:
            logger.debug(
                "Get init object creation time from node {0}".format(node))
            cmd = 'stat --printf=\'%Y\' /proc/1'
            with self.fuel_web.get_ssh_for_node(node) as node_ssh:
                new_timestamp = node_ssh.execute(cmd)['stdout'][0]
            # compute node without ceph role shouldn't reboot
            if not nodes[node]:
                asserts.assert_equal(
                    new_timestamp, old_timestamps[node],
                    'The new timestamp {0} is not equal to old one {1}, '
                    'but it shouldn\'t for {2} node'
                    .format(new_timestamp, old_timestamps[node], node)
                )
            else:
                # other nodes should be rebooted and have new timestamps
                # greater than old
                asserts.assert_true(
                    new_timestamp > old_timestamps[node],
                    'The new timestamp {0} is not greater than old one {1} '
                    'but it should for node {2}'
                    .format(new_timestamp, old_timestamps[node], node)
                )
Ejemplo n.º 20
0
    def deploy_nova_example_ha(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. check plugin health
            10. Run OSTF

        Duration 70m
        Snapshot deploy_nova_example_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(self.env.get_admin_remote(),
                                EXAMPLE_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.get_admin_remote(),
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        if 'fuel_plugin_example' in attr['editable']:
            plugin_data = attr['editable']['fuel_plugin_example']['metadata']
            plugin_data['enabled'] = True

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for node in ('slave-01', 'slave-02', 'slave-03'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'
            res_pgrep = self.env.get_ssh_to_remote_by_name(node).execute(cmd)
            assert_equal(
                0, res_pgrep['exit_code'], 'Failed with error {0} '
                'on node {1}'.format(res_pgrep['stderr'], node))
            assert_equal(
                1, len(res_pgrep['stdout']), 'Failed with error {0} on the '
                'node {1}'.format(res_pgrep['stderr'], node))
            # curl to service
            res_curl = self.env.get_ssh_to_remote_by_name(node).execute(
                cmd_curl)
            assert_equal(
                0, res_pgrep['exit_code'], 'Failed with error {0} '
                'on node {1}'.format(res_curl['stderr'], node))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_nova_example_ha")
Ejemplo n.º 21
0
    def deploy_glusterfs_simple(self):
        """Deploy cluster in simple mode with glusterfs plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller and cinder roles
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Snapshot deploy_glusterfs_simple

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(
            self.env.get_admin_remote(), GLUSTER_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.get_admin_remote(),
            plugin=os.path.basename(GLUSTER_PLUGIN_PATH))

        settings = None

        if NEUTRON_ENABLE:
            settings = {
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_SIMPLE,
            settings=settings
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        if 'external_glusterfs' in attr['editable']:
            plugin_enabled = attr['editable']['external_glusterfs']['metadata']
            plugin_enabled['enabled'] = True
            plugin_data = attr['editable']['external_glusterfs']['endpoint']
            plugin_data['value'] = GLUSTER_CLUSTER_ENDPOINT

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-03'):
            self.check_glusterfs_conf(
                remote=self.env.get_ssh_to_remote_by_name(node),
                path='/etc/cinder/glusterfs',
                gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_glusterfs_simple")
Ejemplo n.º 22
0
    def deploy_glusterfs_ha(self):
        """Deploy cluster in ha mode with glusterfs plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller and cinder roles
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF
            11. Add 2 cinder + controller nodes
            12. Re-deploy cluster
            13. Check plugin health
            14. Run ostf

        Duration 50m
        Snapshot deploy_glasterfs_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(
                remote, GLUSTER_PLUGIN_PATH, '/var')

            # install plugin

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(GLUSTER_PLUGIN_PATH))

        settings = None

        if NEUTRON_ENABLE:
            settings = {
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings=settings
        )

        plugin_name = 'external_glusterfs'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True,
                   'endpoint/value': GLUSTER_CLUSTER_ENDPOINT}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        _ip = self.fuel_web.get_nailgun_node_by_name("slave-03")['ip']
        with self.env.d_env.get_ssh_to_remote(_ip) as remote:
            self.check_glusterfs_conf(
                remote=remote,
                path='/etc/cinder/glusterfs',
                gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller, cinder'],
                'slave-05': ['controller, cinder'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-03', 'slave-04', 'slave-05'):
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                self.check_glusterfs_conf(
                    remote=remote,
                    path='/etc/cinder/glusterfs',
                    gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_glusterfs_ha")
Ejemplo n.º 23
0
    def deploy_zabbix_ceph_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller,ceph-osd roles
            5. Add 2 node with compute,ceph-osd roles
            6. Deploy the cluster
            7. Run network verification
            8. Run OSTF
            9. Check zabbix service in pacemaker
            10. Check login to zabbix dashboard

        Duration 180m
        Snapshot deploy_zabbix_ceph_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(remote, conf.ZABBIX_PLUGIN_PATH, "/var")
            checkers.install_plugin_check_code(remote,
                                               plugin=os.path.basename(
                                                   conf.ZABBIX_PLUGIN_PATH))

        settings = {}
        if conf.NEUTRON_ENABLE:
            settings = {
                "net_provider": "neutron",
                "net_segment_type": conf.NEUTRON_SEGMENT_TYPE
            }

        settings.update({
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False,
            'tenant': 'cephHA',
            'user': '******',
            'password': '******',
            'osd_pool_size': "3"
        })
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=conf.DEPLOYMENT_MODE,
                                                  settings=settings)

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute', 'ceph-osd'],
            })

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=190 * 60)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ceph_ha")
Ejemplo n.º 24
0
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster with one controller and example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)
        checkers.upload_tarball(self.env.d_env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, "/var")

        # install plugin

        checkers.install_plugin_check_code(
            self.env.d_env.get_admin_remote(), plugin=os.path.basename(EXAMPLE_PLUGIN_PATH)
        )

        segment_type = NEUTRON_SEGMENT["vlan"]
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={"net_provider": "neutron", "net_segment_type": segment_type},
        )

        plugin_name = "fuel_plugin_example"
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {"metadata/enabled": True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {"slave-01": ["controller"], "slave-02": ["compute"], "slave-03": ["compute"]}
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format("slave-01"))
        cmd_curl = "curl localhost:8234"
        cmd = "pgrep -f fuel-simple-service"

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep["exit_code"], "Failed with error {0}".format(res_pgrep["stderr"]))
            assert_equal(1, len(res_pgrep["stdout"]), "Failed with error {0}".format(res_pgrep["stderr"]))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep["exit_code"], "Failed with error {0}".format(res_curl["stderr"]))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
Ejemplo n.º 25
0
    def deploy_zabbix_snmp_extreme_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check Extreme Switch trigger with test SNMP message

        Duration 70m
        Snapshot deploy_zabbix_snmp_extreme_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            for plugin in [conf.ZABBIX_PLUGIN_PATH,
                           conf.ZABBIX_SNMP_PLUGIN_PATH,
                           conf.ZABBIX_SNMP_EXTREME_PLUGIN_PATH]:
                checkers.upload_tarball(
                    remote, plugin, "/var")
                checkers.install_plugin_check_code(
                    remote,
                    plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)
        self.setup_snmp_plugin(cluster_id, snmp_community)
        self.setup_snmp_extreme_plugin(cluster_id)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_extreme_critical_command = \
            ("snmptrap -v 1 -c {snmp_community} {management_vip} "
             "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916"
             " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'"
             .format(snmp_community=snmp_community,
                     management_vip=management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_extreme_critical_command)

        public_vip = self.fuel_web.get_public_vip(cluster_id)
        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        wait(lambda: self.check_event_message(
            zabbix_web, 'extreme', 'Power Supply Failed'))

        self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")
Ejemplo n.º 26
0
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            for plugin in [conf.ZABBIX_PLUGIN_PATH,
                           conf.ZABBIX_SNMP_PLUGIN_PATH]:
                checkers.upload_tarball(
                    remote, plugin, "/var")
                checkers.install_plugin_check_code(
                    remote,
                    plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        cmd = "crm resource status p_zabbix-server"
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            response = remote.execute(cmd)["stdout"][0]
        assert_true("p_zabbix-server is running" in response,
                    "p_zabbix-server resource wasn't found in pacemaker:\n{0}"
                    .format(response))

        public_vip = self.fuel_web.get_public_vip(cluster_id)

        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        screens_html = bs4.BeautifulSoup(zabbix_web.get_screens())
        screens_links = screens_html.find_all('a')
        assert_true(any('charts.php?graphid=' in link.get('href')
                        for link in screens_links),
                    "Zabbix screen page does not contain graphs:\n{0}".
                    format(screens_links))

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0)

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
Ejemplo n.º 27
0
    def deploy_ha_one_controller_glusterfs_simple(self):
        """Deploy cluster in ha mode with glusterfs plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller and cinder roles
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_glusterfs
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(self.env.get_admin_remote(),
                                GLUSTER_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.get_admin_remote(),
            plugin=os.path.basename(GLUSTER_PLUGIN_PATH))

        settings = None

        if NEUTRON_ENABLE:
            settings = {
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings=settings)

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        if 'external_glusterfs' in attr['editable']:
            plugin_enabled = attr['editable']['external_glusterfs']['metadata']
            plugin_enabled['enabled'] = True
            plugin_data = attr['editable']['external_glusterfs']['endpoint']
            plugin_data['value'] = GLUSTER_CLUSTER_ENDPOINT

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-03'):
            self.check_glusterfs_conf(
                remote=self.env.get_ssh_to_remote_by_name(node),
                path='/etc/cinder/glusterfs',
                gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_glusterfs")
Ejemplo n.º 28
0
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(self.env.get_admin_remote(),
                                EXAMPLE_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.get_admin_remote(),
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        segment_type = 'vlan'
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      segment_type,
                                                  })

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        if 'fuel_plugin_example' in attr['editable']:
            plugin_data = attr['editable']['fuel_plugin_example']['metadata']
            plugin_data['enabled'] = True

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.env.get_ssh_to_remote_by_name('slave-01').execute(cmd)
        assert_equal(0, res_pgrep['exit_code'],
                     'Failed with error {0}'.format(res_pgrep['stderr']))
        assert_equal(1, len(res_pgrep['stdout']),
                     'Failed with error {0}'.format(res_pgrep['stderr']))
        # curl to service
        res_curl = self.env.get_ssh_to_remote_by_name('slave-01').execute(
            cmd_curl)
        assert_equal(0, res_pgrep['exit_code'],
                     'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
Ejemplo n.º 29
0
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, conf.ZABBIX_PLUGIN_PATH, "/var")
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.ZABBIX_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        cmd = "crm resource status p_zabbix-server"
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            response = remote.execute(cmd)["stdout"][0]
        assert_true("p_zabbix-server is running" in response,
                    "p_zabbix-server resource wasn't found in pacemaker:\n{0}"
                    .format(response))

        public_vip = self.fuel_web.get_public_vip(cluster_id)

        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        screens_html = bs4.BeautifulSoup(zabbix_web.get_screens())
        screens_links = screens_html.find_all('a')
        assert_true(any('charts.php?graphid=' in link.get('href')
                        for link in screens_links),
                    "Zabbix screen page does not contain graphs:\n{0}".
                    format(screens_links))

        self.env.make_snapshot("deploy_zabbix_ha")
Ejemplo n.º 30
0
    def vip_reservation_for_plugin(self):
        """Check vip reservation for fuel plugin

        Scenario:
        1. Revert snapshot with 3 nodes
        2. Download and install fuel-plugin-builder
        3. Create plugin with predefined network_roles.yaml
        4. Build and copy plugin from container nailgun
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Deploy cluster
        8. Check vip reservation

        Duration 40m
        """
        plugin_name = 'vip_reservation_plugin'
        container_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        dir_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'tasks.yaml'
        net_role_file = 'network_roles.yaml'
        metadata_file = 'metadata.yaml'
        namespace = 'haproxy'

        self.env.revert_snapshot("ready_with_3_slaves")
        with self.env.d_env.get_admin_remote() as admin_remote:
            # initiate fuel plugin builder instance
            fpb = FuelPluginBuilder()
            # install fuel_plugin_builder on master node
            fpb.fpb_install()
            # create plugin template on the master node
            fpb.fpb_create_plugin(container_plugin_path)
            # replace plugin tasks, metadata, network_roles
            fpb.fpb_replace_plugin_content(
                os.path.join(dir_path, net_role_file),
                os.path.join(container_plugin_path, net_role_file))
            fpb.fpb_replace_plugin_content(
                os.path.join(dir_path, tasks_file),
                os.path.join(container_plugin_path, tasks_file))
            fpb.fpb_replace_plugin_content(
                os.path.join(dir_path, metadata_file),
                os.path.join(container_plugin_path, metadata_file))
            # build plugin
            packet_name = fpb.fpb_build_plugin(container_plugin_path)
            # copy plugin archive file from nailgun container
            # to the /var directory on the master node
            fpb.fpb_copy_plugin_from_container(
                container_plugin_path,
                packet_name,
                plugin_path)
            # let's install plugin
            checkers.install_plugin_check_code(
                admin_remote,
                plugin=os.path.join(plugin_path, packet_name))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable our one
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('cluster is %s' % str(cluster_id))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute']}
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            hiera_json_out = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \
                             "Hiera.logger = 'noop'; puts JSON.dump " \
                             "(h.lookup('network_metadata', " \
                             "[], {}, nil, nil))\""
            for vip in ('reserved_pub', 'reserved_mng'):
                # get vips from hiera
                vip_hiera = json.loads(
                    remote.execute(
                        hiera_json_out)['stdout'][0])["vips"][vip]["ipaddr"]
                # get vips from database
                vip_db = self.env.postgres_actions.run_query(
                    db='nailgun',
                    query="select ip_addr from ip_addrs where "
                          "vip_type = '\"'\"'{0}'\"'\"';".format(vip))
                vip_array = [vip_hiera, vip_db]
                for ip in vip_array[1:]:
                    asserts.assert_equal(
                        vip_array[0], ip,
                        "Vip from hiera output {0} does not equal "
                        "to {1}".format(vip_array[0], ip))
                vip_pcs = remote.execute(
                    'pcs resource show {0}{1}'.format(
                        'vip__', vip))['exit_code']
                asserts.assert_not_equal(0, vip_pcs,
                                         'The vip__{0} was found in '
                                         'pacemaker'.format(vip))
                vip_ns = remote.execute(
                    'ip netns exec {0} ip a | grep {1}{2}'.format(
                        namespace, 'b_', vip))['exit_code']
                asserts.assert_not_equal(0, vip_ns,
                                         'The {0} was found in {1} '
                                         'namespace'.format(vip, namespace))
Ejemplo n.º 31
0
    def deploy_elasticsearch_kibana_plugin(self):
        """Deploy a cluster with the Elasticsearch-Kibana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with elasticsearch_kibana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_elasticsearch_kibana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(
                remote,
                ELASTICSEARCH_KIBANA_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, self._name),
            msg)

        self.fuel_web.update_plugin_settings(cluster_id, self._name,
                                             self._version, {})

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': [self._role_name]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        es_server_ip = self.get_vip(cluster_id)
        assert_is_not_none(es_server_ip,
                           "Failed to get the IP of Elasticsearch server")

        logger.debug("Check that Elasticsearch is ready")

        r = requests.get("http://{}:9200/".format(es_server_ip))
        msg = "Elasticsearch responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the HTTP server is running")

        r = requests.get("http://{}/".format(es_server_ip))
        msg = "HTTP server responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_elasticsearch_kibana_plugin")
    def vip_reservation_for_plugin(self):
        """Check vip reservation for fuel plugin

        Scenario:
        1. Revert snapshot with 3 nodes
        2. Download and install fuel-plugin-builder
        3. Create plugin with predefined network_roles.yaml
        4. Build and copy plugin from container nailgun
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Deploy cluster
        8. Check vip reservation

        Duration 40m
        """
        plugin_name = 'vip_reservation_plugin'
        plugin_path = '/var'
        dir_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'tasks.yaml'
        net_role_file = 'network_roles.yaml'
        metadata_file = 'metadata.yaml'
        namespace = 'haproxy'

        self.env.revert_snapshot("ready_with_3_slaves")
        with self.env.d_env.get_admin_remote() as admin_remote:
            # initiate fuel plugin builder instance
            fpb = FuelPluginBuilder(admin_remote)
            # install fuel_plugin_builder on master node
            fpb.fpb_install()
            # create plugin template on the master node
            fpb.fpb_create_plugin(plugin_name)
            # replace plugin tasks, metadata, network_roles
            fpb.fpb_replace_plugin_content(
                os.path.join(dir_path, net_role_file),
                os.path.join('/root/', plugin_name, net_role_file))
            fpb.fpb_replace_plugin_content(
                os.path.join(dir_path, tasks_file),
                os.path.join('/root/', plugin_name, tasks_file))
            fpb.fpb_replace_plugin_content(
                os.path.join(dir_path, metadata_file),
                os.path.join('/root/', plugin_name, metadata_file))
            # build plugin
            fpb.fpb_build_plugin(os.path.join('/root/', plugin_name))
            # copy plugin archive file from nailgun container
            # to the /var directory on the master node
            fpb.fpb_copy_plugin_from_container(plugin_name, plugin_path)
            # let's install plugin
            checkers.install_plugin_check_code(
                admin_remote,
                plugin=os.path.join(plugin_path, '{}.rpm'.format(plugin_name)))

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      NEUTRON_SEGMENT_TYPE
                                                  })
        # get plugins from fuel and enable our one
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('cluster is %s' % str(cluster_id))

        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            hiera_json_out = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \
                             "Hiera.logger = 'noop'; puts JSON.dump " \
                             "(h.lookup('network_metadata', " \
                             "[], {}, nil, nil))\""
            for vip in ('reserved_pub', 'reserved_mng'):
                # get vips from hiera
                vip_hiera = json.loads(
                    remote.execute(hiera_json_out)['stdout']
                    [0])["vips"][vip]["ipaddr"]
                # get vips from database
                vip_db = self.env.postgres_actions.run_query(
                    db='nailgun',
                    query="select ip_addr from ip_addrs where "
                    "vip_type = '\"'\"'{0}'\"'\"';".format(vip))
                vip_array = [vip_hiera, vip_db]
                for ip in vip_array[1:]:
                    asserts.assert_equal(
                        vip_array[0], ip,
                        "Vip from hiera output {0} does not equal "
                        "to {1}".format(vip_array[0], ip))
                vip_pcs = remote.execute('pcs resource show {0}{1}'.format(
                    'vip__', vip))['exit_code']
                asserts.assert_not_equal(
                    0, vip_pcs, 'The vip__{0} was found in '
                    'pacemaker'.format(vip))
                vip_ns = remote.execute(
                    'ip netns exec {0} ip a | grep {1}{2}'.format(
                        namespace, 'b_', vip))['exit_code']
                asserts.assert_not_equal(
                    0, vip_ns, 'The {0} was found in {1} '
                    'namespace'.format(vip, namespace))
Ejemplo n.º 33
0
    def deploy_cluster_with_reboot_plugin_timeout(self):
        """Check deployment is failed by reboot task plugin.

        Scenario:
            1. Revert snapshot with 3 nodes
            2. Download and install fuel-plugin-builder
            3. Create plugin with reboot task,
               set timeout for reboot task as 1 second
            4. Build and copy plugin from container nailgun
            5. Install plugin to fuel
            6. Create cluster and enable plugin
            7. Provision nodes
            8. Deploy cluster
            9. Check deployment was failed by reboot task
            10. Check error msg at the logs

        Duration 15m
        """
        # define some plugin related variables
        plugin_name = 'timeout_plugin'
        container_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        tasks_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'reboot_tasks.yaml'
        # start reverting snapshot
        self.env.revert_snapshot("ready_with_3_slaves")
        # let's get ssh client for the master node
        with self.env.d_env.get_admin_remote() as admin_remote:
            # initiate fuel plugin builder instance
            fpb = FuelPluginBuilder()
            # install fuel_plugin_builder on master node
            fpb.fpb_install()
            # change timeout to a new value '1'
            fpb.put_value_to_local_yaml(os.path.join(tasks_path, tasks_file),
                                        os.path.join('/tmp/', tasks_file),
                                        [1, 'parameters', 'timeout'],
                                        1)
            # create plugin template on the master node
            fpb.fpb_create_plugin(container_plugin_path)
            # replace plugin tasks with our file
            fpb.fpb_replace_plugin_content(
                os.path.join('/tmp/', tasks_file),
                os.path.join(container_plugin_path, 'tasks.yaml'))
            # build plugin
            packet_name = fpb.fpb_build_plugin(container_plugin_path)
            # copy plugin archive file from nailgun container
            # to the /var directory on the master node
            fpb.fpb_copy_plugin_from_container(
                container_plugin_path,
                packet_name,
                plugin_path)
            # let's install plugin
            checkers.install_plugin_check_code(
                admin_remote,
                plugin=os.path.join(plugin_path, packet_name))
        # create cluster
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable it
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('cluster is %s' % str(cluster_id))

        self.fuel_web.update_nodes(
            cluster_id,
            {'slave-01': ['controller', 'ceph-osd']}
        )

        self.fuel_web.provisioning_cluster_wait(cluster_id)
        logger.info('Start cluster #%s deployment', cluster_id)
        task = self.fuel_web.client.deploy_nodes(cluster_id)
        self.fuel_web.assert_task_failed(task)

        msg = 'Time detection (1 sec) for node reboot has expired'
        cmd = 'grep "{0}" /var/log/docker-logs/astute/astute.log'.format(msg)
        with self.env.d_env.get_admin_remote() as admin_remote:
            result = admin_remote.execute(cmd)['stdout'][0]

        asserts.assert_true(
            msg in result,
            'Failed to find reboot plugin warning message in logs'
        )
Ejemplo n.º 34
0
    def separate_db_service(self):
        """Deploy cluster with 3 separate database roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with database role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_db_service
        """
        self.check_run("separate_db_service")
        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        checkers.upload_tarball(
            self.env.d_env.get_admin_remote(),
            settings.SEPARATE_SERVICE_DB_PLUGIN_PATH, "/var")

        # install plugins

        checkers.install_plugin_check_code(
            self.env.d_env.get_admin_remote(),
            plugin=os.path.basename(settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

        data = {
            'tenant': 'separatedb',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-database'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database'],
                'slave-05': ['standalone-database'],
                'slave-06': ['standalone-database'],
                'slave-07': ['compute'],
                'slave-08': ['cinder']
            }
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("separate_db_service", is_make=True)
Ejemplo n.º 35
0
    def deploy_neutron_lbaas_simple(self):
        """Deploy cluster in simple mode with LbaaS plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check health of lbaas agent on the node
            9. Create pool and vip
            10. Run OSTF

        Duration 35m
        Snapshot deploy_neutron_vlan_lbaas_simple

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(remote, LBAAS_PLUGIN_PATH, '/var')

            # install plugin

            checkers.install_plugin_check_code(
                remote, plugin=os.path.basename(LBAAS_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE_SIMPLE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      NEUTRON_SEGMENT_TYPE,
                                                  })

        plugin_name = 'lbaas'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        logger.debug('we have lbaas element')
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        asserts.assert_equal(str(cluster['net_provider']), 'neutron')

        self.fuel_web.verify_network(cluster_id)

        controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
        os_conn = os_actions.OpenStackActions(controller['ip'])

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbass_work(os_conn)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_vlan_lbaas_simple")
Ejemplo n.º 36
0
    def deploy_nova_example_ha(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. check plugin health
            10. Run OSTF

        Duration 70m
        Snapshot deploy_nova_example_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(
            self.env.d_env.get_admin_remote(), EXAMPLE_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.d_env.get_admin_remote(),
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for node in ('slave-01', 'slave-02', 'slave-03'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            res_pgrep = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0} '
                         'on node {1}'.format(res_pgrep['stderr'], node))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0} on the '
                         'node {1}'.format(res_pgrep['stderr'], node))
            # curl to service
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            res_curl = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0} '
                         'on node {1}'.format(res_curl['stderr'], node))

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_nova_example_ha")
    def separate_keystone_ceph_service(self):
        """Deploy cluster with 3 separate keystone roles and ceph

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with keystone+database role
            4. Add 1 compute and 2 ceph nodes
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_keystone_ceph_service
        """
        self.check_run("separate_keystone_ceph_service")
        self.env.revert_snapshot("ready_with_9_slaves")

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugins to the master node

            checkers.upload_tarball(remote,
                                    settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
                                    "/var")

            checkers.upload_tarball(
                remote, settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH, "/var")

            # install plugins

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(
                    settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(
                    settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': '2',
            'tenant': 'separatekeystoneceph',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_names = ['detach-database', 'detach-keystone']
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        for plugin_name in plugin_names:
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            options = {'metadata/enabled': True}
            self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database', 'standalone-keystone'],
                'slave-05': ['standalone-database', 'standalone-keystone'],
                'slave-06': ['standalone-database', 'standalone-keystone'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_keystone_ceph_service")
Ejemplo n.º 38
0
    def deploy_lma_collector_ha(self):
        """Deploy cluster in HA mode with the LMA collector plugin

        This also deploys the Elasticsearch-Kibana plugin and the
        InfluxDB-Grafana plugin since they work together with the LMA collector
        plugin.

        Scenario:
            1. Upload plugins to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute + cinder role
            6. Add 1 node with influxdb_grafana + elasticsearch roles
            7. Deploy the cluster
            8. Check that the plugins work
            9. Run OSTF

        Duration 70m
        Snapshot deploy_lma_collector_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugins to the master node
            checkers.upload_tarball(remote, conf.LMA_COLLECTOR_PLUGIN_PATH,
                                    "/var")
            checkers.upload_tarball(remote,
                                    conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH,
                                    "/var")
            checkers.upload_tarball(remote, conf.INFLUXDB_GRAFANA_PLUGIN_PATH,
                                    "/var")

            # install plugins
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.LMA_COLLECTOR_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.INFLUXDB_GRAFANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": conf.NEUTRON_SEGMENT_TYPE,
            })

        influxdb_user = "******"
        influxdb_pass = "******"
        influxdb_rootpass = "******"
        grafana_user = "******"
        grafana_pass = "******"
        plugins = [
            {
                'name': 'lma_collector',
                'options': {
                    'metadata/enabled': True,
                    'environment_label/value': 'deploy_lma_collector_ha',
                    'elasticsearch_mode/value': 'local',
                    'influxdb_mode/value': 'local',
                }
            },
            {
                'name': 'elasticsearch_kibana',
                'options': {
                    'metadata/enabled': True,
                }
            },
            {
                'name': 'influxdb_grafana',
                'options': {
                    'metadata/enabled': True,
                    'influxdb_rootpass/value': influxdb_rootpass,
                    'influxdb_username/value': influxdb_user,
                    'influxdb_userpass/value': influxdb_pass,
                    'grafana_username/value': grafana_user,
                    'grafana_userpass/value': grafana_pass,
                }
            },
        ]
        for plugin in plugins:
            plugin_name = plugin['name']
            msg = "Plugin '%s' couldn't be found. Test aborted" % plugin_name
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            logger.debug('%s plugin is installed' % plugin_name)
            self.fuel_web.update_plugin_data(cluster_id, plugin_name,
                                             plugin['options'])

        analytics_roles = ["influxdb_grafana", "elasticsearch_kibana"]
        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute", "cinder"],
                "slave-05": analytics_roles,
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        analytics_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, analytics_roles)
        msg = "One node with '{}' roles must be present, found {}".format(
            ' + '.join(analytics_roles), len(analytics_nodes))

        assert_true(len(analytics_nodes) == 1, msg)

        analytics_node_ip = analytics_nodes[0].get('ip')
        assert_is_not_none(analytics_node_ip,
                           "Fail to retrieve the IP address for slave-05")

        def assert_http_get_response(url, expected=200):
            r = requests.get(url)
            assert_equal(
                r.status_code, expected,
                "{} responded with {}, expected {}".format(
                    url, r.status_code, expected))

        logger.debug("Check that Elasticsearch is ready")
        assert_http_get_response("http://{0}:9200/".format(analytics_node_ip))

        logger.debug("Check that Kibana is ready")
        assert_http_get_response("http://{0}/".format(analytics_node_ip))

        logger.debug("Check that the root user can access InfluxDB")
        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        assert_http_get_response(
            influxdb_url.format(analytics_node_ip, 'root', influxdb_rootpass))
        logger.debug("Check that the LMA user can access InfluxDB")
        assert_http_get_response(
            influxdb_url.format(analytics_node_ip, influxdb_user,
                                influxdb_pass))

        logger.debug("Check that the LMA user can access Grafana")
        assert_http_get_response("http://{0}:{1}@{2}:8000/api/org".format(
            grafana_user, grafana_pass, analytics_node_ip))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_lma_collector_ha")
Ejemplo n.º 39
0
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as admin_remote:
            # copy plugin to the master node
            checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
            checkers.upload_tarball(
                admin_remote,
                EXAMPLE_PLUGIN_V3_PATH,
                '/var')
            # install plugin
            checkers.install_plugin_check_code(
                admin_remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE
            }
        )

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        with self.env.fuel_web.get_ssh_for_node('slave-01') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_sh')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_puppet')

            # check if fuel_plugin_example_v3_puppet called
            # between netconfig and connectivity_tests
            netconfig_str = 'MODULAR: netconfig.pp'
            plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
            connect_str = 'MODULAR: connectivity_tests.pp'
            checkers.check_log_lines_order(remote,
                                           log_file_path='/var/log/puppet.log',
                                           line_matcher=[netconfig_str,
                                                         plugin_str,
                                                         connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        with self.env.fuel_web.get_ssh_for_node('slave-02') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        with self.env.fuel_web.get_ssh_for_node('slave-03') as remote:
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.0.all')
            checkers.check_file_exists(remote,
                                       '/tmp/plugin+100.all')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_sh')
            checkers.check_file_exists(remote,
                                       '/tmp/fuel_plugin_example_v3_puppet')

            # check if service run on slave-03
            logger.debug("Checking service on node {0}".format('slave-03'))

            cmd = 'pgrep -f fuel-simple-service'
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Command {0} failed with error {1}'
                         .format(cmd, res_pgrep['stderr']))
            process_count = len(res_pgrep['stdout'])
            assert_equal(1, process_count,
                         "There should be 1 process 'fuel-simple-service',"
                         " but {0} found {1} processes".format(cmd,
                                                               process_count))

            # curl to service
            cmd_curl = 'curl localhost:8234'
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Command {0} failed with error {1}'
                         .format(cmd_curl, res_curl['stderr']))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3")
Ejemplo n.º 40
0
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            for plugin in [conf.ZABBIX_PLUGIN_PATH,
                           conf.ZABBIX_SNMP_PLUGIN_PATH]:
                checkers.upload_tarball(
                    remote, plugin, "/var")
                checkers.install_plugin_check_code(
                    remote,
                    plugin=os.path.basename(plugin))

        settings = None

        if conf.NEUTRON_ENABLE:
            settings = {
                "net_provider": "neutron",
                "net_segment_type": conf.NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
            settings=settings
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        cmd = "crm resource status p_zabbix-server"
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            response = remote.execute(cmd)["stdout"][0]
        assert_true("p_zabbix-server is running" in response,
                    "p_zabbix-server resource wasn't found in pacemaker:\n{0}"
                    .format(response))

        public_vip = self.fuel_web.get_public_vip(cluster_id)

        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        screens_html = bs4.BeautifulSoup(zabbix_web.get_screens())
        screens_links = screens_html.find_all('a')
        assert_true(any('charts.php?graphid=' in link.get('href')
                        for link in screens_links),
                    "Zabbix screen page does not contain graphs:\n{0}".
                    format(screens_links))

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0)

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
Ejemplo n.º 41
0
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster with one controller and example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote,
                EXAMPLE_PLUGIN_PATH, '/var')

            # install plugin

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        segment_type = NEUTRON_SEGMENT['vlan']
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type,
            }
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
Ejemplo n.º 42
0
    def deploy_influxdb_grafana_plugin(self):
        """Deploy a cluster with the InfluxDB-Grafana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with influxdb_grafana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_influxdb_grafana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node and install it
        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, INFLUXDB_GRAFANA_PLUGIN_PATH, '/var')
            checkers.install_plugin_check_code(
                remote, plugin=os.path.basename(INFLUXDB_GRAFANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'influxdb_grafana'
        options = {
            'metadata/enabled': True,
            'node_name/value': 'slave-03_influxdb_grafana',
            'influxdb_rootpass/value': 'lmapass',
            'influxdb_userpass/value': 'lmapass',
            'grafana_userpass/value': 'lmapass',
        }

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            "Plugin couldn't be enabled. Check plugin version. Test aborted")

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['influxdb_grafana']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        influxdb_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        influxdb_server_ip = influxdb_server.get('ip')
        assert_is_not_none(influxdb_server_ip,
                           "Failed to get the IP of InfluxDB server")

        logger.debug("Check that InfluxDB is ready")

        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        r = requests.get(influxdb_url.format(
            influxdb_server_ip, 'lma', options['influxdb_userpass/value']))
        msg = "InfluxDB responded with {}, expected 200".format(r.status_code)
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the Grafana server is running")

        r = requests.get(
            "http://{0}:{1}@{2}:8000/api/org".format(
                'grafana', options['grafana_userpass/value'],
                influxdb_server_ip))
        msg = "Grafana server responded with {}, expected 200".format(
            r.status_code)
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_influxdb_grafana_plugin")
Ejemplo n.º 43
0
    def deploy_neutron_example_ha(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. check plugin health
            10. Run OSTF

        Duration 70m
        Snapshot deploy_neutron_example_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, EXAMPLE_PLUGIN_PATH, '/var')

            # install plugin

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for node in ('slave-01', 'slave-02', 'slave-03'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'
            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(1, len(res_pgrep['stdout']),
                             'Failed with error {0} on the '
                             'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_curl['stderr'], node))

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha")
Ejemplo n.º 44
0
    def deploy_elasticsearch_kibana_plugin(self):
        """Deploy a cluster with the Elasticsearch-Kibana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with base-os role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_elasticsearch_kibana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(
            self.env.d_env.get_admin_remote(),
            ELASTICSEARCH_KIBANA_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.d_env.get_admin_remote(),
            plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE,
            }
        )

        plugin_name = 'elasticsearch_kibana'
        options = {'metadata/enabled': True,
                   'node_name/value': 'slave-03_base-os'}
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['base-os']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        es_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        es_server_ip = es_server.get('ip')
        assert_is_not_none(es_server_ip,
                           "Failed to get the IP of Elasticsearch server")

        logger.debug("Check that Elasticseach is ready")

        r = requests.get("http://{}:9200/".format(es_server_ip))
        msg = "Elasticsearch responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the HTTP server is running")

        r = requests.get("http://{}/".format(es_server_ip))
        msg = "HTTP server responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_elasticsearch_kibana_plugin")
    def vip_reservation_for_plugin_custom_ns(self):
        """Check vip reservation for custom ns plugin

        Scenario:
        1. Revert snapshot with 3 nodes
        2. Download and install fuel-plugin-builder
        3. Create plugin with predefined network_roles.yaml
        4. Build and copy plugin to /var
        5. Install plugin to fuel
        6. Create cluster and enable plugin
        7. Deploy cluster
        8. Check vip reservation

        Duration 40m
        """
        plugin_name = 'vip_reservation_plugin'
        source_plugin_path = os.path.join('/root/', plugin_name)
        plugin_path = '/var'
        task_path = os.path.dirname(os.path.abspath(__file__))
        tasks_file = 'tasks.yaml'
        net_role_file = 'network_roles.yaml'
        metadata_file = 'metadata.yaml'
        namespace = 'custom_ns'
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as admin_remote:
            self.show_step(2)
            # initiate fuel plugin builder instance
            fpb = FuelPluginBuilder()
            # install fuel_plugin_builder on master node
            fpb.fpb_install()
            # create plugin template on the master node
            self.show_step(3)
            fpb.fpb_create_plugin(source_plugin_path)
            # replace plugin tasks, metadata, network_roles
            fpb.fpb_replace_plugin_content(
                os.path.join(task_path, net_role_file),
                os.path.join(source_plugin_path, net_role_file))
            fpb.fpb_replace_plugin_content(
                os.path.join(task_path, tasks_file),
                os.path.join(source_plugin_path, tasks_file))
            fpb.fpb_replace_plugin_content(
                os.path.join(task_path, metadata_file),
                os.path.join(source_plugin_path, metadata_file))

            fpb.change_remote_yaml(
                os.path.join(source_plugin_path, net_role_file),
                [0, 'properties', 'vip', 0, 'namespace'], namespace)
            fpb.change_remote_yaml(
                os.path.join(source_plugin_path, net_role_file),
                [1, 'properties', 'vip', 0, 'namespace'], namespace)
            # build plugin
            self.show_step(4)
            packet_name = fpb.fpb_build_plugin(source_plugin_path)
            # copy plugin archive file
            # to the /var directory on the master node
            fpb.fpb_copy_plugin(os.path.join(source_plugin_path, packet_name),
                                plugin_path)
            self.show_step(5)
            # let's install plugin
            checkers.install_plugin_check_code(admin_remote,
                                               plugin=os.path.join(
                                                   plugin_path, packet_name))
        self.show_step(6)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )
        # get plugins from fuel and enable our one
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        logger.info('cluster is %s' % str(cluster_id))

        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })
        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            hiera_json_out = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \
                             "Hiera.logger = 'noop'; " \
                             "puts JSON.dump(h.lookup('network_metadata', " \
                             "[], {}, nil, nil))\""
            for vip in ('reserved_pub', 'reserved_mng'):
                # get vips from hiera
                vip_hiera = json.loads(
                    remote.execute(hiera_json_out)['stdout']
                    [0])["vips"][vip]["ipaddr"]
                # get vips from database
                vip_db = self.env.postgres_actions.run_query(
                    db='nailgun',
                    query="select ip_addr from ip_addrs where "
                    "vip_type = '\"'\"'{0}'\"'\"';".format(vip))
                # get vips from pacemaker
                vip_pcs = remote.execute('pcs resource show {0}{1}'.format(
                    'vip__', vip))['stdout'][1].split(' ')[6].split('=')[1]
                # get vips from namespace
                vip_ns = remote.execute(
                    'ip netns exec {0} ip -4 a show {1}{2}'.format(
                        namespace, 'b_',
                        vip))['stdout'][1].split(' ')[5].split('/')[0]
                vip_array = [vip_hiera, vip_db, vip_pcs, vip_ns]
                for ip in vip_array[1:]:
                    asserts.assert_equal(
                        vip_array[0], ip,
                        "Vip from hiera output {0} does not equal "
                        "to {1}".format(vip_array[0], ip))
Ejemplo n.º 46
0
    def separate_rabbit_ceph_service(self):
        """Deploy cluster with 3 separate rabbit roles and ceph

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with rabbit role
            4. Add 1 compute and 2 ceph nodes
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_rabbit_ceph_service
        """
        self.check_run("separate_rabbit_ceph_service")
        self.env.revert_snapshot("ready_with_9_slaves")

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugins to the master node

            checkers.upload_tarball(remote, settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH, "/var")

            # install plugins

            checkers.install_plugin_check_code(
                remote, plugin=os.path.basename(settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH)
            )

        data = {
            "volumes_lvm": False,
            "volumes_ceph": True,
            "images_ceph": True,
            "ephemeral_ceph": True,
            "objects_ceph": True,
            "osd_pool_size": "2",
            "tenant": "separaterabbitceph",
            "user": "******",
            "password": "******",
            "net_provider": "neutron",
            "net_segment_type": settings.NEUTRON_SEGMENT["tun"],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__, mode=settings.DEPLOYMENT_MODE, settings=data
        )

        plugin_name = "detach-rabbitmq"
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {"metadata/enabled": True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["standalone-rabbitmq"],
                "slave-05": ["standalone-rabbitmq"],
                "slave-06": ["standalone-rabbitmq"],
                "slave-07": ["compute"],
                "slave-08": ["ceph-osd"],
                "slave-09": ["ceph-osd"],
            },
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_rabbit_ceph_service")
Ejemplo n.º 47
0
    def deploy_neutron_lbaas_simple_reset_ready(self):
        """Deploy cluster in simple mode with LbaaS plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check health of lbaas agent on the node
            9. Create pool and vip
            10. Reset cluster
            11. Add 1 compute
            12. Re-deploy cluster
            13. Check health of lbaas agent on the node
            14. Create pool and vip
            15. Run OSTF

        Snapshot deploy_neutron_lbaas_simple_reset_ready

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(
            self.env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.get_admin_remote(),
            plugin=os.path.basename(LBAAS_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_SIMPLE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE,
            }
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        if 'lbaas' in attr['editable']:
            logger.debug('we have lbaas element')
            plugin_data = attr['editable']['lbaas']['metadata']
            plugin_data['enabled'] = True

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        asserts.assert_equal(str(cluster['net_provider']), 'neutron')

        self.fuel_web.verify_network(cluster_id)

        controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
        os_conn = os_actions.OpenStackActions(controller['ip'])

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbass_work(os_conn)

        self.fuel_web.stop_reset_env_wait(cluster_id)

        self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:2])

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-03': ['compute'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbass_work(os_conn)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_lbaas_simple_reset_ready")
Ejemplo n.º 48
0
    def deploy_neutron_lbaas_simple(self):
        """Deploy cluster in simple mode with LbaaS plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check health of lbaas agent on the node
            9. Create pool and vip
            10. Run OSTF

        Snapshot deploy_neutron_vlan_lbaas_simple

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(
            self.env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.get_admin_remote(),
            plugin=os.path.basename(LBAAS_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_SIMPLE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE,
            }
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
        if 'lbaas' in attr['editable']:
            logger.debug('we have lbaas element')
            plugin_data = attr['editable']['lbaas']['metadata']
            plugin_data['enabled'] = True

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        asserts.assert_equal(str(cluster['net_provider']), 'neutron')

        self.fuel_web.verify_network(cluster_id)

        controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
        os_conn = os_actions.OpenStackActions(controller['ip'])

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbass_work(os_conn)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_vlan_lbaas_simple")
Ejemplo n.º 49
0
    def separate_db_service(self):
        """Deploy cluster with 3 separate database roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with database role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_db_service
        """
        self.check_run("separate_db_service")
        self.env.revert_snapshot("ready_with_9_slaves")

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugins to the master node

            checkers.upload_tarball(remote,
                                    settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
                                    "/var")

            # install plugins

            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(
                    settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

        data = {
            'tenant': 'separatedb',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-database'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database'],
                'slave-05': ['standalone-database'],
                'slave-06': ['standalone-database'],
                'slave-07': ['compute'],
                'slave-08': ['cinder']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_db_service", is_make=True)
Ejemplo n.º 50
0
    def deploy_elasticsearch_kibana_plugin(self):
        """Deploy a cluster with the Elasticsearch-Kibana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with base-os role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_elasticsearch_kibana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(
                remote,
                ELASTICSEARCH_KIBANA_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE,
            }
        )

        plugin_name = 'elasticsearch_kibana'
        options = {'metadata/enabled': True,
                   'node_name/value': 'slave-03_base-os'}
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['base-os']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        es_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        es_server_ip = es_server.get('ip')
        assert_is_not_none(es_server_ip,
                           "Failed to get the IP of Elasticsearch server")

        logger.debug("Check that Elasticseach is ready")

        r = requests.get("http://{}:9200/".format(es_server_ip))
        msg = "Elasticsearch responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the HTTP server is running")

        r = requests.get("http://{}/".format(es_server_ip))
        msg = "HTTP server responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_elasticsearch_kibana_plugin")
Ejemplo n.º 51
0
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster with one controller and example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node

        checkers.upload_tarball(
            self.env.d_env.get_admin_remote(),
            EXAMPLE_PLUGIN_PATH, '/var')

        # install plugin

        checkers.install_plugin_check_code(
            self.env.d_env.get_admin_remote(),
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        segment_type = 'vlan'
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": segment_type,
            }
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
        res_pgrep = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd)
        assert_equal(0, res_pgrep['exit_code'],
                     'Failed with error {0}'.format(res_pgrep['stderr']))
        assert_equal(1, len(res_pgrep['stdout']),
                     'Failed with error {0}'.format(res_pgrep['stderr']))
        # curl to service
        _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
        res_curl = self.env.d_env.get_ssh_to_remote(_ip).execute(cmd_curl)
        assert_equal(0, res_pgrep['exit_code'],
                     'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
Ejemplo n.º 52
0
    def deploy_emc_ha(self):
        """Deploy cluster in ha mode with emc plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin installation
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_emc
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(remote, CONF.EMC_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(remote,
                                               plugin=os.path.basename(
                                                   CONF.EMC_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=CONF.DEPLOYMENT_MODE,
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)

        # check plugin installed and attributes have emc options

        for option in [
                "emc_sp_a_ip", "emc_sp_b_ip", "emc_username", "emc_password",
                "emc_pool_name"
        ]:
            asserts.assert_true(
                option in attr["editable"]["emc_vnx"],
                "{0} is not in cluster attributes: {1}".format(
                    option, str(attr["editable"]["storage"])))

        # disable LVM-based volumes

        attr["editable"]["storage"]["volumes_lvm"]["value"] = False

        # enable EMC plugin

        emc_options = attr["editable"]["emc_vnx"]
        emc_options["metadata"]["enabled"] = True
        emc_options["emc_sp_a_ip"]["value"] = CONF.EMC_SP_A_IP
        emc_options["emc_sp_b_ip"]["value"] = CONF.EMC_SP_B_IP
        emc_options["emc_username"]["value"] = CONF.EMC_USERNAME
        emc_options["emc_password"]["value"] = CONF.EMC_PASSWORD
        emc_options["emc_pool_name"]["value"] = CONF.EMC_POOL_NAME

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # get remotes for all nodes

        controller_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-01', 'slave-02', 'slave-03']
        ]
        compute_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-04', 'slave-05']
        ]

        controller_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in controller_nodes
        ]
        compute_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in compute_nodes
        ]

        # check cinder-volume settings

        for remote in controller_remotes:
            self.check_emc_cinder_config(remote=remote,
                                         path='/etc/cinder/cinder.conf')
            self.check_emc_management_package(remote=remote)

        # check cinder-volume layout on controllers

        cinder_volume_ctrls = [
            self.check_service(controller, "cinder-volume")
            for controller in controller_remotes
        ]
        asserts.assert_equal(
            sum(cinder_volume_ctrls), 1, "Cluster has more than one "
            "cinder-volume on controllers")

        # check cinder-volume layout on computes

        cinder_volume_comps = [
            self.check_service(compute, "cinder-volume")
            for compute in compute_remotes
        ]
        # closing connections
        [remote.clear() for remote in controller_remotes]
        [remote.clear() for remote in compute_remotes]

        asserts.assert_equal(sum(cinder_volume_comps), 0,
                             "Cluster has active cinder-volume on compute")

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_emc")
Ejemplo n.º 53
0
    def deploy_emc_ha(self):
        """Deploy cluster in ha mode with emc plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin installation
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_emc
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(
                remote,
                CONF.EMC_PLUGIN_PATH, '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(CONF.EMC_PLUGIN_PATH))

        settings = None

        if CONF.NEUTRON_ENABLE:
            settings = {
                "net_provider": 'neutron',
                "net_segment_type": CONF.NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=CONF.DEPLOYMENT_MODE,
            settings=settings
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)

        # check plugin installed and attributes have emc options

        for option in ["emc_sp_a_ip", "emc_sp_b_ip",
                       "emc_username", "emc_password", "emc_pool_name"]:
            asserts.assert_true(option in attr["editable"]["emc_vnx"],
                                "{0} is not in cluster attributes: {1}".
                                format(option,
                                       str(attr["editable"]["storage"])))

        # disable LVM-based volumes

        attr["editable"]["storage"]["volumes_lvm"]["value"] = False

        # enable EMC plugin

        emc_options = attr["editable"]["emc_vnx"]
        emc_options["metadata"]["enabled"] = True
        emc_options["emc_sp_a_ip"]["value"] = CONF.EMC_SP_A_IP
        emc_options["emc_sp_b_ip"]["value"] = CONF.EMC_SP_B_IP
        emc_options["emc_username"]["value"] = CONF.EMC_USERNAME
        emc_options["emc_password"]["value"] = CONF.EMC_PASSWORD
        emc_options["emc_pool_name"]["value"] = CONF.EMC_POOL_NAME

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # get remotes for all nodes

        controller_nodes = [self.fuel_web.get_nailgun_node_by_name(node)
                            for node in ['slave-01', 'slave-02', 'slave-03']]
        compute_nodes = [self.fuel_web.get_nailgun_node_by_name(node)
                         for node in ['slave-04', 'slave-05']]

        controller_remotes = [self.env.d_env.get_ssh_to_remote(node['ip'])
                              for node in controller_nodes]
        compute_remotes = [self.env.d_env.get_ssh_to_remote(node['ip'])
                           for node in compute_nodes]

        # check cinder-volume settings

        for remote in controller_remotes:
            self.check_emc_cinder_config(
                remote=remote, path='/etc/cinder/cinder.conf')
            self.check_emc_management_package(remote=remote)

        # check cinder-volume layout on controllers

        cinder_volume_ctrls = [self.check_service(controller, "cinder-volume")
                               for controller in controller_remotes]
        asserts.assert_equal(sum(cinder_volume_ctrls), 1,
                             "Cluster has more than one "
                             "cinder-volume on controllers")

        # check cinder-volume layout on computes

        cinder_volume_comps = [self.check_service(compute, "cinder-volume")
                               for compute in compute_remotes]
        # closing connections
        [remote.clear() for remote in controller_remotes]
        [remote.clear() for remote in compute_remotes]

        asserts.assert_equal(sum(cinder_volume_comps), 0,
                             "Cluster has active cinder-volume on compute")

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_emc")
Ejemplo n.º 54
0
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, conf.ZABBIX_PLUGIN_PATH, "/var")
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(conf.ZABBIX_PLUGIN_PATH))

        settings = None
        if conf.NEUTRON_ENABLE:
            settings = {
                "net_provider": "neutron",
                "net_segment_type": conf.NEUTRON_SEGMENT_TYPE
            }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=conf.DEPLOYMENT_MODE,
            settings=settings
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        cmd = "crm resource status p_zabbix-server"
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            response = remote.execute(cmd)["stdout"][0]
        assert_true("p_zabbix-server is running" in response,
                    "p_zabbix-server resource wasn't found in pacemaker:\n{0}"
                    .format(response))

        public_vip = self.fuel_web.get_public_vip(cluster_id)

        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        screens_html = bs4.BeautifulSoup(zabbix_web.get_screens())
        screens_links = screens_html.find_all('a')
        assert_true(any('charts.php?graphid=' in link.get('href')
                        for link in screens_links),
                    "Zabbix screen page does not contain graphs:\n{0}".
                    format(screens_links))

        self.env.make_snapshot("deploy_zabbix_ha")