def _prepare_contrail_plugin(self, slaves=None, pub_net=False):
        """Copy necessary packages to the master node and install them"""

        self.env.revert_snapshot("ready_with_{:d}_slaves".format(slaves))

        with self.env.d_env.get_admin_remote() as remote:

            # copy plugin to the master node
            utils.upload_tarball(
                ip=self.ssh_manager.admin_ip,
                tar_path=CONTRAIL_PLUGIN_PATH,
                tar_target='/var'
            )

            # install plugin
            utils.install_plugin_check_code(
                ip=self.ssh_manager.admin_ip,
                plugin=os.path.basename(CONTRAIL_PLUGIN_PATH))

            # copy additional packages to the master node
            self._upload_contrail_packages(remote)

            # install packages
            self._install_packages(remote)

        # prepare fuel
        self._assign_net_provider(pub_net)
def install_dvs_plugin(master_node):
    """Download and instal DVS plugin on master node."""
    # copy plugins to the master node
    utils.upload_tarball(master_node, DVS_PLUGIN_PATH, "/var")

    # install plugin
    utils.install_plugin_check_code(master_node,
                                    os.path.basename(DVS_PLUGIN_PATH))
Exemple #3
0
    def upload_plugin(self):
        """Upload plugin to master node"""
        # copy plugin to the master node
        assert_true(self.plugin_path, "plugin_path is not specified")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=self.plugin_path,
                             tar_target='/var')
def install_dvs_plugin(master_node):
    """Download and instal DVS plugin on master node."""
    # copy plugins to the master node
    utils.upload_tarball(master_node, DVS_PLUGIN_PATH, "/var")

    # install plugin
    utils.install_plugin_check_code(master_node,
                                    os.path.basename(DVS_PLUGIN_PATH))
    def upload_plugin(self):
        """Upload plugin to master node"""
        # copy plugin to the master node
        assert_true(self.plugin_path, "plugin_path is not specified")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=self.plugin_path,
            tar_target='/var')
def install_manila_plugin(master_node_ip):
    """Install plugin packages to the master node."""

    utils.upload_tarball(
        master_node_ip,
        MANILA_PLUGIN_PATH, "/var")
    utils.install_plugin_check_code(
        master_node_ip,
        os.path.basename(MANILA_PLUGIN_PATH))
 def install_plugin(self):
     """Method designed to install plugin on cluster."""
     master_remote = self.get_remote('master')
     utils.upload_tarball(master_remote.host,
                          os.environ['MISTRAL_PLUGIN_PATH'],
                          '/var')
     utils.install_plugin_check_code(
         master_remote.host,
         os.path.basename(os.environ['MISTRAL_PLUGIN_PATH']))
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.ZABBIX_PLUGIN_PATH,
            tar_target="/var")
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.ZABBIX_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ha")
    def deploy_zabbix_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard

        Duration 70m
        Snapshot deploy_zabbix_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.ZABBIX_PLUGIN_PATH,
                             tar_target="/var")
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.basename(
                                            settings.ZABBIX_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ha")
    def install_nsxt_plugin(self):
        """Download and install NSX-T plugin on master node.

        :return: None
        """
        master_ip = self.ssh_manager.admin_ip
        utils.upload_tarball(ip=master_ip,
                             tar_path=self.default.NSXT_PLUGIN_PATH,
                             tar_target='/var')

        utils.install_plugin_check_code(
            ip=master_ip,
            plugin=os.path.basename(self.default.NSXT_PLUGIN_PATH))
    def _bootstrap(self):

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH,
            tar_target="/var")

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        plugin_options = {
            'send_to/value': 'root@localhost',
            'send_from/value': 'nagios@localhost',
            'smtp_host/value': '127.0.0.1',
            'nagios_password/value': self._nagios_password,
        }

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, self._name),
                    msg)
        logger.debug(
            '{name:s} ({ver!s}) plugin is installed'
            ''.format(name=self._name, ver=self._version))
        self.fuel_web.update_plugin_settings(cluster_id,
                                             self._name,
                                             self._version,
                                             plugin_options)

        return cluster_id
    def deploy_lma_toolchain(self):
        """Deploy cluster in HA mode with the LMA toolchain

        This also deploys the Elasticsearch-Kibana plugin and the
        InfluxDB-Grafana plugin since they work together with the LMA collector
        plugin.

        Scenario:
            1. Upload plugins to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute + cinder role
            6. Add 1 node with influxdb_grafana + elasticsearch_kibana +
               infrastructure_alerting roles
            7. Deploy the cluster
            8. Check that the plugins work
            9. Run OSTF

        Duration 150m
        Snapshot deploy_lma_toolchain

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # TODO(scroiset): use actions fuel_actions.py
        # upload_plugin and install_plugin
        # copy plugins to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.LMA_COLLECTOR_PLUGIN_PATH,
                             tar_target="/var")
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH,
            tar_target="/var")
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.INFLUXDB_GRAFANA_PLUGIN_PATH,
                             tar_target="/var")
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH,
                             tar_target="/var")

        # install plugins
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_COLLECTOR_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.INFLUXDB_GRAFANA_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        influxdb_user = "******"
        influxdb_pass = "******"
        influxdb_rootpass = "******"
        grafana_user = "******"
        grafana_pass = "******"
        mysql_dbname = "grafanalma"
        mysql_user = "******"
        mysql_pass = "******"
        nagios_pass = "******"
        plugins = [
            {
                'name': 'lma_collector',
                'version': '0.9.0',
                'options': {
                    'environment_label/value': 'deploy_lma_toolchain',
                    'elasticsearch_mode/value': 'local',
                    'influxdb_mode/value': 'local',
                    'alerting_mode/value': 'local',
                }
            },
            {
                'name': 'elasticsearch_kibana',
                'version': '0.9.0',
                'options': {}
            },
            {
                'name': 'lma_infrastructure_alerting',
                'version': '0.9.0',
                'options': {
                    'send_to/value': 'root@localhost',
                    'send_from/value': 'nagios@localhost',
                    'smtp_host/value': '127.0.0.1',
                    'nagios_password/value': nagios_pass,
                }
            },
            {
                'name': 'influxdb_grafana',
                'version': '0.9.0',
                'options': {
                    'influxdb_rootpass/value': influxdb_rootpass,
                    'influxdb_username/value': influxdb_user,
                    'influxdb_userpass/value': influxdb_pass,
                    'grafana_username/value': grafana_user,
                    'grafana_userpass/value': grafana_pass,
                    'mysql_mode/value': 'local',
                    'mysql_dbname/value': mysql_dbname,
                    'mysql_username/value': mysql_user,
                    'mysql_password/value': mysql_pass,
                }
            },
        ]
        for plugin in plugins:
            plugin_name = plugin['name']
            plugin_version = plugin['version']
            msg = "Plugin '{:s}' couldn't be found. " \
                  "Test aborted".format(plugin_name)
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            logger.debug('{:s} plugin is installed'.format(plugin_name))
            self.fuel_web.update_plugin_settings(cluster_id, plugin_name,
                                                 plugin_version,
                                                 plugin['options'])

        analytics_roles = [
            "influxdb_grafana", "elasticsearch_kibana",
            "infrastructure_alerting"
        ]
        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute", "cinder"],
                "slave-05": analytics_roles,
            })
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=9000)

        analytics_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, analytics_roles)
        msg = "One node with '{}' roles must be present, found {}".format(
            ' + '.join(analytics_roles), len(analytics_nodes))

        assert_true(len(analytics_nodes) == 1, msg)

        elasticsearch_kibana_vip = self.get_vip(cluster_id, 'es_vip_mgmt')
        influxdb_grafana_vip = self.get_vip(cluster_id, 'influxdb')
        nagios_vip = self.get_vip(cluster_id, 'infrastructure_alerting')
        assert_is_not_none(
            elasticsearch_kibana_vip,
            "Fail to retrieve the Elasticsearch/Kibana cluster VIP address")
        assert_is_not_none(
            influxdb_grafana_vip,
            "Fail to retrieve the InfluxDB/Grafana cluster VIP address")
        assert_is_not_none(
            nagios_vip,
            "Fail to retrieve the Infrastructure Alerting cluster VIP address")

        def assert_http_get_response(url, expected=200):
            r = requests.get(url)
            assert_equal(
                r.status_code, expected,
                "{} responded with {}, expected {}".format(
                    url, r.status_code, expected))

        logger.debug("Check that Elasticsearch is ready")
        assert_http_get_response(
            "http://{0}:9200/".format(elasticsearch_kibana_vip))

        logger.debug("Check that Kibana is ready")
        assert_http_get_response(
            "http://{0}/".format(elasticsearch_kibana_vip))

        logger.debug("Check that the root user can access InfluxDB")
        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        assert_http_get_response(
            influxdb_url.format(influxdb_grafana_vip, 'root',
                                influxdb_rootpass))
        logger.debug("Check that the LMA user can access InfluxDB")
        assert_http_get_response(
            influxdb_url.format(influxdb_grafana_vip, influxdb_user,
                                influxdb_pass))

        logger.debug("Check that the LMA user can access Grafana")
        assert_http_get_response("http://{0}:{1}@{2}:8000/api/org".format(
            grafana_user, grafana_pass, influxdb_grafana_vip))

        nagios_url = "http://{}:{}".format(nagios_vip, '8001')
        r = requests.get(nagios_url, auth=('nagiosadmin', nagios_pass))
        assert_equal(
            r.status_code, 200,
            "Nagios HTTP response code {}, expected {}".format(
                r.status_code, 200))
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_lma_toolchain")
    def separate_horizon_service(self):
        """Deploy cluster with 3 separate horizon roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with horizon role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_horizon_service
        """
        self.check_run("separate_horizon_service")
        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_HORIZON_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': '2',
            'tenant': 'separatehorizon',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-horizon'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-horizon'],
                'slave-05': ['standalone-horizon'],
                'slave-06': ['standalone-horizon'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_horizon_service", is_make=True)
    def separate_db_ceph_service(self):
        """Deployment with separate db nodes and ceph for all

        Scenario:
            1. Install the plugin on the master node
            2. Create Ubuntu, Neutron VXLAN, ceph for all cluster
            3. Change ceph replication factor to 2
            4. Add 3 nodes with controller role
            5. Add 3 nodes with db role
            6. Add 1 compute node
            7. Add 2 ceph nodes
            8. Run network verification
            9. Deploy changes
            10. Run network verification
            11. Run OSTF tests

        Duration 120m
        Snapshot separate_db_ceph_service
        """
        self.check_run("separate_db_ceph_service")
        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'osd_pool_size': '2',
            'objects_ceph': True,
            'tenant': 'separatedbceph',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-database'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database'],
                'slave-05': ['standalone-database'],
                'slave-06': ['standalone-database'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            }
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("separate_db_ceph_service")
Exemple #15
0
    def deploy_influxdb_grafana_plugin(self):
        """Deploy a cluster with the InfluxDB-Grafana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with influxdb_grafana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_influxdb_grafana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node and install it
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=INFLUXDB_GRAFANA_PLUGIN_PATH,
                             tar_target='/var')
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(INFLUXDB_GRAFANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'influxdb_grafana'
        options = {
            'metadata/enabled': True,
            'node_name/value': 'slave-03_influxdb_grafana',
            'influxdb_rootpass/value': 'lmapass',
            'influxdb_userpass/value': 'lmapass',
            'grafana_userpass/value': 'lmapass',
        }

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            "Plugin couldn't be enabled. Check plugin version. Test aborted")

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['influxdb_grafana']
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)

        influxdb_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        influxdb_server_ip = influxdb_server.get('ip')
        assert_is_not_none(influxdb_server_ip,
                           "Failed to get the IP of InfluxDB server")

        logger.debug("Check that InfluxDB is ready")

        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        r = requests.get(
            influxdb_url.format(influxdb_server_ip, 'lma',
                                options['influxdb_userpass/value']))
        msg = "InfluxDB responded with {}, expected 200".format(r.status_code)
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the Grafana server is running")

        r = requests.get("http://{0}:{1}@{2}:8000/api/org".format(
            'grafana', options['grafana_userpass/value'], influxdb_server_ip))
        msg = "Grafana server responded with {}, expected 200".format(
            r.status_code)
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_influxdb_grafana_plugin")
    def deploy_ha_one_controller_glusterfs_simple(self):
        """Deploy cluster with one controller and glusterfs plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller and cinder roles
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_glusterfs
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=GLUSTER_PLUGIN_PATH,
            tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(GLUSTER_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'external_glusterfs'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True,
                   'endpoint/value': GLUSTER_CLUSTER_ENDPOINT}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-03'):
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                self.check_glusterfs_conf(
                    remote=remote,
                    path='/etc/cinder/glusterfs',
                    gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_glusterfs")
    def separate_keystone_ceph_service(self):
        """Deployment with separate keystone nodes and ceph for all

        Scenario:
            1. Install database and keystone plugins on the master node
            2. Create Ubuntu, Neutron VXLAN, ceph for all storages cluster
            3. Change ceph replication factor to 2
            4. Add 3 nodes with controller role
            5. Add 3 nodes with database+keystone role
            6. Add 1 compute node
            7. Add 2 ceph nodes
            8. Run network verification
            9. Deploy changes
            10. Run network verification
            11. Run OSTF tests

        Duration 120m
        Snapshot separate_keystone_ceph_service
        """
        self.check_run("separate_keystone_ceph_service")

        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH)
        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH)
        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
                             tar_target="/var")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': '2',
            'tenant': 'separatekeystoneceph',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_names = ['detach-database', 'detach-keystone']
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        for plugin_name in plugin_names:
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            options = {'metadata/enabled': True}
            self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database', 'standalone-keystone'],
                'slave-05': ['standalone-database', 'standalone-keystone'],
                'slave-06': ['standalone-database', 'standalone-keystone'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_keystone_ceph_service")
    def deploy_lma_toolchain(self):
        """Deploy cluster in HA mode with the LMA toolchain

        This also deploys the Elasticsearch-Kibana plugin and the
        InfluxDB-Grafana plugin since they work together with the LMA collector
        plugin.

        Scenario:
            1. Upload plugins to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute + cinder role
            6. Add 1 node with influxdb_grafana + elasticsearch_kibana +
               infrastructure_alerting roles
            7. Deploy the cluster
            8. Check that the plugins work
            9. Run OSTF

        Duration 150m
        Snapshot deploy_lma_toolchain

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # TODO(scroiset): use actions fuel_actions.py
        # upload_plugin and install_plugin
        # copy plugins to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.LMA_COLLECTOR_PLUGIN_PATH,
            tar_target="/var")
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH,
            tar_target="/var")
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.INFLUXDB_GRAFANA_PLUGIN_PATH,
            tar_target="/var")
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH,
            tar_target="/var")

        # install plugins
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_COLLECTOR_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.INFLUXDB_GRAFANA_PLUGIN_PATH))
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.LMA_INFRA_ALERTING_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        influxdb_user = "******"
        influxdb_pass = "******"
        influxdb_rootpass = "******"
        grafana_user = "******"
        grafana_pass = "******"
        mysql_dbname = "grafanalma"
        mysql_user = "******"
        mysql_pass = "******"
        nagios_pass = "******"
        plugins = [
            {
                'name': 'lma_collector',
                'version': '0.9.0',
                'options': {
                    'environment_label/value': 'deploy_lma_toolchain',
                    'elasticsearch_mode/value': 'local',
                    'influxdb_mode/value': 'local',
                    'alerting_mode/value': 'local',
                }
            },
            {
                'name': 'elasticsearch_kibana',
                'version': '0.9.0',
                'options': {
                }
            },
            {
                'name': 'lma_infrastructure_alerting',
                'version': '0.9.0',
                'options': {
                    'send_to/value': 'root@localhost',
                    'send_from/value': 'nagios@localhost',
                    'smtp_host/value': '127.0.0.1',
                    'nagios_password/value': nagios_pass,
                }
            },
            {
                'name': 'influxdb_grafana',
                'version': '0.9.0',
                'options': {
                    'influxdb_rootpass/value': influxdb_rootpass,
                    'influxdb_username/value': influxdb_user,
                    'influxdb_userpass/value': influxdb_pass,
                    'grafana_username/value': grafana_user,
                    'grafana_userpass/value': grafana_pass,
                    'mysql_mode/value': 'local',
                    'mysql_dbname/value': mysql_dbname,
                    'mysql_username/value': mysql_user,
                    'mysql_password/value': mysql_pass,
                }
            },
        ]
        for plugin in plugins:
            plugin_name = plugin['name']
            plugin_version = plugin['version']
            msg = "Plugin '{:s}' couldn't be found. " \
                  "Test aborted".format(plugin_name)
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            logger.debug('{:s} plugin is installed'.format(plugin_name))
            self.fuel_web.update_plugin_settings(
                cluster_id, plugin_name,
                plugin_version, plugin['options'])

        analytics_roles = ["influxdb_grafana",
                           "elasticsearch_kibana",
                           "infrastructure_alerting"]
        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute", "cinder"],
                "slave-05": analytics_roles,
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=9000)

        analytics_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, analytics_roles
        )
        msg = "One node with '{}' roles must be present, found {}".format(
            ' + '.join(analytics_roles), len(analytics_nodes))

        assert_true(len(analytics_nodes) == 1, msg)

        elasticsearch_kibana_vip = self.get_vip(cluster_id, 'es_vip_mgmt')
        influxdb_grafana_vip = self.get_vip(cluster_id, 'influxdb')
        nagios_vip = self.get_vip(cluster_id, 'infrastructure_alerting')
        assert_is_not_none(
            elasticsearch_kibana_vip,
            "Fail to retrieve the Elasticsearch/Kibana cluster VIP address"
        )
        assert_is_not_none(
            influxdb_grafana_vip,
            "Fail to retrieve the InfluxDB/Grafana cluster VIP address"
        )
        assert_is_not_none(
            nagios_vip,
            "Fail to retrieve the Infrastructure Alerting cluster VIP address"
        )

        def assert_http_get_response(url, expected=200):
            r = requests.get(url)
            assert_equal(r.status_code, expected,
                         "{} responded with {}, expected {}".format(
                             url, r.status_code, expected))

        logger.debug("Check that Elasticsearch is ready")
        assert_http_get_response("http://{0}:9200/".format(
            elasticsearch_kibana_vip))

        logger.debug("Check that Kibana is ready")
        assert_http_get_response("http://{0}/".format(
            elasticsearch_kibana_vip))

        logger.debug("Check that the root user can access InfluxDB")
        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,
                                                     'root',
                                                     influxdb_rootpass))
        logger.debug("Check that the LMA user can access InfluxDB")
        assert_http_get_response(influxdb_url.format(influxdb_grafana_vip,
                                                     influxdb_user,
                                                     influxdb_pass))

        logger.debug("Check that the LMA user can access Grafana")
        assert_http_get_response(
            "http://{0}:{1}@{2}:8000/api/org".format(grafana_user,
                                                     grafana_pass,
                                                     influxdb_grafana_vip))

        nagios_url = "http://{}:{}".format(nagios_vip, '8001')
        r = requests.get(nagios_url, auth=('nagiosadmin',
                                           nagios_pass))
        assert_equal(
            r.status_code, 200,
            "Nagios HTTP response code {}, expected {}".format(
                r.status_code, 200)
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_lma_toolchain")
Exemple #19
0
    def deploy_ha_one_controller_glusterfs_simple(self):
        """Deploy cluster with one controller and glusterfs plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller and cinder roles
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_glusterfs
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=GLUSTER_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(GLUSTER_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'external_glusterfs'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {
            'metadata/enabled': True,
            'endpoint/value': GLUSTER_CLUSTER_ENDPOINT
        }
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-03'):
            _ip = self.fuel_web.get_nailgun_node_by_name(node)['ip']
            with self.env.d_env.get_ssh_to_remote(_ip) as remote:
                self.check_glusterfs_conf(
                    remote=remote,
                    path='/etc/cinder/glusterfs',
                    gfs_endpoint=GLUSTER_CLUSTER_ENDPOINT)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_glusterfs")
    def deploy_zabbix_snmp_extreme_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check Extreme Switch trigger with test SNMP message

        Duration 70m
        Snapshot deploy_zabbix_snmp_extreme_ha

        """
        check_plugin_path_env(var_name='ZABBIX_SNMP_PLUGIN_PATH',
                              plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH)
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_EXTREME_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH)
        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [
                settings.ZABBIX_PLUGIN_PATH, settings.ZABBIX_SNMP_PLUGIN_PATH,
                settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH
        ]:
            utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                                 tar_path=plugin,
                                 tar_target="/var")
            utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                            plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)
        self.setup_snmp_plugin(cluster_id, snmp_community)
        self.setup_snmp_extreme_plugin(cluster_id)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_extreme_critical_command = \
            ("snmptrap -v 1 -c {snmp_community} {management_vip} "
             "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916"
             " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'"
             .format(snmp_community=snmp_community,
                     management_vip=management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_extreme_critical_command)

        public_vip = self.fuel_web.get_public_vip(cluster_id)
        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        wait(lambda: self.check_event_message(zabbix_web, 'extreme',
                                              'Power Supply Failed'))

        self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")
    def deploy_zabbix_ceph_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller,ceph-osd roles
            5. Add 2 node with compute,ceph-osd roles
            6. Deploy the cluster
            7. Run network verification
            8. Run OSTF
            9. Check zabbix service in pacemaker
            10. Check login to zabbix dashboard

        Duration 180m
        Snapshot deploy_zabbix_ceph_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.ZABBIX_PLUGIN_PATH,
                             tar_target="/var")
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.basename(
                                            settings.ZABBIX_PLUGIN_PATH))

        cluster_settings = {
            "net_provider": "neutron",
            "net_segment_type": settings.NEUTRON_SEGMENT_TYPE,
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False,
            'tenant': 'cephHA',
            'user': '******',
            'password': '******',
            'osd_pool_size': "3"
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=cluster_settings)

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute', 'ceph-osd'],
            })

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=190 * 60)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ceph_ha")
    def deploy_elasticsearch_kibana_plugin(self):
        """Deploy a cluster with the Elasticsearch-Kibana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with elasticsearch_kibana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_elasticsearch_kibana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=ELASTICSEARCH_KIBANA_PLUGIN_PATH,
            tar_target='/var'
        )

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, self._name),
            msg)

        self.fuel_web.update_plugin_settings(cluster_id, self._name,
                                             self._version, {})

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': [self._role_name]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        es_server_ip = self.get_vip(cluster_id)
        assert_is_not_none(es_server_ip,
                           "Failed to get the IP of Elasticsearch server")

        logger.debug("Check that Elasticsearch is ready")

        r = requests.get("http://{}:9200/".format(es_server_ip))
        msg = "Elasticsearch responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the HTTP server is running")

        r = requests.get("http://{}/".format(es_server_ip))
        msg = "HTTP server responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_elasticsearch_kibana_plugin")
    def deploy_zabbix_ceph_radosgw_ha(self):
        """Deploy cluster in ha mode with zabbix plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller,ceph-osd roles
            5. Add 2 node with compute,ceph-osd roles
            6. Deploy the cluster
            7. Run network verification
            8. Run OSTF
            9. Check zabbix service in pacemaker
            10. Check login to zabbix dashboard

        Duration 180m
        Snapshot deploy_zabbix_ceph_radosgw_ha

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.ZABBIX_PLUGIN_PATH,
            tar_target="/var")
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.ZABBIX_PLUGIN_PATH))

        cluster_settings = {
            "net_provider": "neutron",
            "net_segment_type": settings.NEUTRON_SEGMENT_TYPE,
            'objects_ceph': True,
            'volumes_ceph': True,
            'images_ceph': True,
            'volumes_lvm': False,
            'tenant': 'rados',
            'user': '******',
            'password': '******',
            'osd_pool_size': "3"
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=cluster_settings
        )

        zabbix_username = '******'
        zabbix_password = '******'
        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute', 'ceph-osd'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=190 * 60)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        self.env.make_snapshot("deploy_zabbix_ceph_radosgw_ha")
    def deploy_zabbix_snmp_extreme_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check Extreme Switch trigger with test SNMP message

        Duration 70m
        Snapshot deploy_zabbix_snmp_extreme_ha

        """
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH
        )
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_EXTREME_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH
        )
        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [settings.ZABBIX_PLUGIN_PATH,
                       settings.ZABBIX_SNMP_PLUGIN_PATH,
                       settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH]:
            utils.upload_tarball(
                ip=self.ssh_manager.admin_ip,
                tar_path=plugin,
                tar_target="/var")
            utils.install_plugin_check_code(
                ip=self.ssh_manager.admin_ip,
                plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)
        self.setup_snmp_plugin(cluster_id, snmp_community)
        self.setup_snmp_extreme_plugin(cluster_id)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_extreme_critical_command = \
            ("snmptrap -v 1 -c {snmp_community} {management_vip} "
             "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916"
             " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'"
             .format(snmp_community=snmp_community,
                     management_vip=management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_extreme_critical_command)

        public_vip = self.fuel_web.get_public_vip(cluster_id)
        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        wait(lambda: self.check_event_message(
            zabbix_web, 'extreme', 'Power Supply Failed'),
            timeout_msg='Power Supply Failed event not found in Zabbix')

        self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [settings.ZABBIX_PLUGIN_PATH,
                       settings.ZABBIX_SNMP_PLUGIN_PATH]:
            utils.upload_tarball(
                ip=self.ssh_manager.admin_ip,
                tar_path=plugin,
                tar_target="/var")
            utils.install_plugin_check_code(
                ip=self.ssh_manager.admin_ip,
                plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0,
                 timeout_msg='SNMP heartbeat status not found '
                             ' in /var/log/zabbix/zabbix_server.log')

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
Exemple #26
0
    def deploy_elasticsearch_kibana_plugin(self):
        """Deploy a cluster with the Elasticsearch-Kibana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with elasticsearch_kibana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_elasticsearch_kibana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=ELASTICSEARCH_KIBANA_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, self._name),
                    msg)

        self.fuel_web.update_plugin_settings(cluster_id, self._name,
                                             self._version, {})

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': [self._role_name]
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)

        es_server_ip = self.get_vip(cluster_id)
        assert_is_not_none(es_server_ip,
                           "Failed to get the IP of Elasticsearch server")

        logger.debug("Check that Elasticsearch is ready")

        r = requests.get("http://{}:9200/".format(es_server_ip))
        msg = "Elasticsearch responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the HTTP server is running")

        r = requests.get("http://{}/".format(es_server_ip))
        msg = "HTTP server responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_elasticsearch_kibana_plugin")
Exemple #27
0
    def deploy_murano_with_glare_ha_one_controller(self):
        """Deploy cluster in ha mode with murano plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Add 1 node with murano role
            8. Deploy the cluster
            9. Run network verification
            10. Run sanity OSTF
            11. Run Murano Platform OSTF

        Duration 150m
        Snapshot deploy_murano_with_glare_ha_one_controller
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.MURANO_PLUGIN_PATH,
                             tar_target="/var")
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.basename(
                                            settings.MURANO_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            configure_ssl=False)

        self.setup_murano_plugin(cluster_id, glare=True)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["compute"],
                "slave-03": ["cinder"],
                "slave-04": ["murano-node"]
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['sanity'])

        logger.debug('Run OSTF platform tests')

        test_class_main = ('fuel_health.tests.tests_platform'
                           '.test_murano_linux.MuranoDeployLinuxServicesTests')
        tests_names = ['test_deploy_dummy_app_with_glare']

        test_classes = []

        for test_name in tests_names:
            test_classes.append('{0}.{1}'.format(test_class_main, test_name))

        for test_name in test_classes:
            self.fuel_web.run_single_ostf_test(cluster_id=cluster_id,
                                               test_sets=['tests_platform'],
                                               test_name=test_name,
                                               timeout=60 * 20)

        self.env.make_snapshot("deploy_murano_with_glare_ha_one_controller")
Exemple #28
0
    def deploy_emc_ha(self):
        """Deploy cluster in ha mode with emc plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin installation
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_emc
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.EMC_PLUGIN_PATH,
            tar_target='/var'
        )

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.EMC_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)

        # check plugin installed and attributes have emc options

        for option in ["emc_sp_a_ip", "emc_sp_b_ip",
                       "emc_username", "emc_password", "emc_pool_name"]:
            asserts.assert_true(option in attr["editable"]["emc_vnx"],
                                "{0} is not in cluster attributes: {1}".
                                format(option,
                                       str(attr["editable"]["storage"])))

        # disable LVM-based volumes

        attr["editable"]["storage"]["volumes_lvm"]["value"] = False

        # enable EMC plugin

        emc_options = attr["editable"]["emc_vnx"]
        emc_options["metadata"]["enabled"] = True
        emc_options["emc_sp_a_ip"]["value"] = settings.EMC_SP_A_IP
        emc_options["emc_sp_b_ip"]["value"] = settings.EMC_SP_B_IP
        emc_options["emc_username"]["value"] = settings.EMC_USERNAME
        emc_options["emc_password"]["value"] = settings.EMC_PASSWORD
        emc_options["emc_pool_name"]["value"] = settings.EMC_POOL_NAME

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # get remotes for all nodes

        controller_nodes = [self.fuel_web.get_nailgun_node_by_name(node)
                            for node in ['slave-01', 'slave-02', 'slave-03']]
        compute_nodes = [self.fuel_web.get_nailgun_node_by_name(node)
                         for node in ['slave-04', 'slave-05']]

        controller_remotes = [self.env.d_env.get_ssh_to_remote(node['ip'])
                              for node in controller_nodes]
        compute_remotes = [self.env.d_env.get_ssh_to_remote(node['ip'])
                           for node in compute_nodes]

        # check cinder-volume settings

        for node in controller_nodes:
            self.check_emc_cinder_config(
                ip=node['ip'], path='/etc/cinder/cinder.conf')
            self.check_emc_management_package(ip=node['ip'])

        # check cinder-volume layout on controllers

        cinder_volume_ctrls = [self.check_service(controller, "cinder-volume")
                               for controller in controller_remotes]
        asserts.assert_equal(sum(cinder_volume_ctrls), 1,
                             "Cluster has more than one "
                             "cinder-volume on controllers")

        # check cinder-volume layout on computes

        cinder_volume_comps = [self.check_service(compute, "cinder-volume")
                               for compute in compute_remotes]
        # closing connections
        for remote in controller_remotes:
            remote.clear()
        for remote in compute_remotes:
            remote.clear()

        asserts.assert_equal(sum(cinder_volume_comps), 0,
                             "Cluster has active cinder-volume on compute")

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_emc")
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster with one controller and example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        segment_type = NEUTRON_SEGMENT['vlan']
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      segment_type,
                                                      "propagate_task_deploy":
                                                      True
                                                  })

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        check_plugin_path_env(var_name='ZABBIX_SNMP_PLUGIN_PATH',
                              plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [
                settings.ZABBIX_PLUGIN_PATH, settings.ZABBIX_SNMP_PLUGIN_PATH
        ]:
            utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                                 tar_path=plugin,
                                 tar_target="/var")
            utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                            plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0)

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
    def deploy_emc_ha(self):
        """Deploy cluster in ha mode with emc plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin installation
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_emc
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.EMC_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.basename(
                                            settings.EMC_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)

        # check plugin installed and attributes have emc options

        for option in [
                "emc_sp_a_ip", "emc_sp_b_ip", "emc_username", "emc_password",
                "emc_pool_name"
        ]:
            asserts.assert_true(
                option in attr["editable"]["emc_vnx"],
                "{0} is not in cluster attributes: {1}".format(
                    option, str(attr["editable"]["storage"])))

        # disable LVM-based volumes

        attr["editable"]["storage"]["volumes_lvm"]["value"] = False

        # enable EMC plugin

        emc_options = attr["editable"]["emc_vnx"]
        emc_options["metadata"]["enabled"] = True
        emc_options["emc_sp_a_ip"]["value"] = settings.EMC_SP_A_IP
        emc_options["emc_sp_b_ip"]["value"] = settings.EMC_SP_B_IP
        emc_options["emc_username"]["value"] = settings.EMC_USERNAME
        emc_options["emc_password"]["value"] = settings.EMC_PASSWORD
        emc_options["emc_pool_name"]["value"] = settings.EMC_POOL_NAME

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # get remotes for all nodes

        controller_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-01', 'slave-02', 'slave-03']
        ]
        compute_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-04', 'slave-05']
        ]

        controller_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in controller_nodes
        ]
        compute_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in compute_nodes
        ]

        # check cinder-volume settings

        for node in controller_nodes:
            self.check_emc_cinder_config(ip=node['ip'],
                                         path='/etc/cinder/cinder.conf')
            self.check_emc_management_package(ip=node['ip'])

        # check cinder-volume layout on controllers

        cinder_volume_ctrls = [
            self.check_service(controller, "cinder-volume")
            for controller in controller_remotes
        ]
        asserts.assert_equal(
            sum(cinder_volume_ctrls), 1, "Cluster has more than one "
            "cinder-volume on controllers")

        # check cinder-volume layout on computes

        cinder_volume_comps = [
            self.check_service(compute, "cinder-volume")
            for compute in compute_remotes
        ]
        # closing connections
        for remote in controller_remotes:
            remote.clear()
        for remote in compute_remotes:
            remote.clear()

        asserts.assert_equal(sum(cinder_volume_comps), 0,
                             "Cluster has active cinder-volume on compute")

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_emc")
    def deploy_influxdb_grafana_plugin(self):
        """Deploy a cluster with the InfluxDB-Grafana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with influxdb_grafana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_influxdb_grafana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node and install it
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=INFLUXDB_GRAFANA_PLUGIN_PATH,
            tar_target='/var')
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(INFLUXDB_GRAFANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'influxdb_grafana'
        options = {
            'metadata/enabled': True,
            'node_name/value': 'slave-03_influxdb_grafana',
            'influxdb_rootpass/value': 'lmapass',
            'influxdb_userpass/value': 'lmapass',
            'grafana_userpass/value': 'lmapass',
        }

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            "Plugin couldn't be enabled. Check plugin version. Test aborted")

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['influxdb_grafana']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        influxdb_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        influxdb_server_ip = influxdb_server.get('ip')
        assert_is_not_none(influxdb_server_ip,
                           "Failed to get the IP of InfluxDB server")

        logger.debug("Check that InfluxDB is ready")

        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        r = requests.get(influxdb_url.format(
            influxdb_server_ip, 'lma', options['influxdb_userpass/value']))
        msg = "InfluxDB responded with {}, expected 200".format(r.status_code)
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the Grafana server is running")

        r = requests.get(
            "http://{0}:{1}@{2}:8000/api/org".format(
                'grafana', options['grafana_userpass/value'],
                influxdb_server_ip))
        msg = "Grafana server responded with {}, expected 200".format(
            r.status_code)
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_influxdb_grafana_plugin")
    def separate_haproxy(self):
        """Deploy HA environment with separate Haproxy.

        Scenario:
            1. Revert snapshot with ready master node
            2. Copy and install external-lb and detach-haproxy plugins
            3. Bootstrap 3 slaves from default nodegroup
            4. Create cluster with Neutron VXLAN and custom nodegroups
            5. Run 'fuel-mirror' to replace cluster repositories
               with local mirrors
            6. Bootstrap 2 slaves nodes from second nodegroup
               and one node from third node group
            7. Enable plugins for cluster
            8. Add 2 controllers from default nodegroup and 1 controller
               from second node group
            9. Add 1 compute+cinder from default node group
               and 1 compute+cinder from second node group
            10. Add haproxy node from third node group
            11. Verify networks
            12. Deploy cluster

        Duration 120m
        Snapshot separate_haproxy
        """

        if not MULTIPLE_NETWORKS:
            raise exceptions.FuelQAVariableNotSet('MULTIPLE_NETWORKS', 'true')

        self.show_step(1)
        self.env.revert_snapshot('ready')

        self.show_step(2)
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH,
                             tar_target="/var")

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=SEPARATE_SERVICE_BALANCER_PLUGIN_PATH,
                             tar_target="/var")

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH))

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(SEPARATE_SERVICE_BALANCER_PLUGIN_PATH))

        self.show_step(3)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
        self.show_step(4)
        admin_ip = self.ssh_manager.admin_ip
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings={
                                                      'net_provider':
                                                      NEUTRON,
                                                      'net_segment_type':
                                                      NEUTRON_SEGMENT['tun'],
                                                      'tenant':
                                                      'separatehaproxy',
                                                      'user':
                                                      '******',
                                                      'password':
                                                      '******',
                                                      'ntp_list': [admin_ip],
                                                  })
        self.show_step(5)
        if MIRROR_UBUNTU != '':
            ubuntu_url = MIRROR_UBUNTU.split()[1]
            replace_cmd = \
                "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \
                " /usr/share/fuel-mirror/ubuntu.yaml".format(
                    ubuntu_url)
            self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=replace_cmd)
        create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_mirror_cmd)
        apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \
                           '--env {0} --replace'.format(cluster_id)
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd)

        self.show_step(6)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[6:7])

        self.show_step(7)
        plugin_name = 'detach_haproxy'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        plugin_name = 'external_loadbalancer'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name), msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.show_step(8)
        self.show_step(9)
        self.show_step(10)
        nodegroup1 = NODEGROUPS[0]['name']
        nodegroup2 = NODEGROUPS[1]['name']
        nodegroup3 = NODEGROUPS[2]['name']

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': [['controller'], nodegroup1],
                'slave-02': [['controller'], nodegroup1],
                'slave-04': [['compute', 'cinder'], nodegroup2],
                'slave-05': [['controller'], nodegroup2],
                'slave-03': [['compute', 'cinder'], nodegroup1],
                'slave-07': [['standalone-haproxy'], nodegroup3]
            })

        self.show_step(11)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(12)
        self.fuel_web.deploy_cluster_wait(cluster_id,
                                          timeout=180 * 60,
                                          check_services=False)

        self.env.make_snapshot('separate_haproxy')
    def separate_rabbit_service(self):
        """Deploy cluster with 3 separate rabbit roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with rabbit role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_rabbit_service
        """
        self.check_run("separate_rabbit_service")
        checkers.check_plugin_path_env(
            var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH))

        data = {
            'tenant': 'separaterabbit',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-rabbitmq'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-rabbitmq'],
                'slave-05': ['standalone-rabbitmq'],
                'slave-06': ['standalone-rabbitmq'],
                'slave-07': ['compute'],
                'slave-08': ['cinder']
            }
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("separate_rabbit_service", is_make=True)
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.check_run("deploy_ha_one_controller_neutron_example_v3")
        checkers.check_plugin_path_env(
            var_name='EXAMPLE_PLUGIN_V3_PATH',
            plugin_path=EXAMPLE_PLUGIN_V3_PATH
        )

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=EXAMPLE_PLUGIN_V3_PATH,
            tar_target='/var'
        )
        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={"propagate_task_deploy": True}
        )

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if fuel_plugin_example_v3_puppet called
        # between netconfig and connectivity_tests
        netconfig_str = 'MODULAR: netconfig/netconfig.pp'
        plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
        connect_str = 'MODULAR: netconfig/connectivity_tests.pp'
        checkers.check_log_lines_order(
            ip=slave1['ip'],
            log_file_path='/var/log/puppet.log',
            line_matcher=[netconfig_str,
                          plugin_str,
                          connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if service run on slave-03
        logger.debug("Checking service on node {0}".format('slave-03'))

        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.ssh_manager.execute_on_remote(
            ip=slave3['ip'],
            cmd=cmd
        )
        process_count = len(res_pgrep['stdout'])
        assert_equal(1, process_count,
                     "There should be 1 process 'fuel-simple-service',"
                     " but {0} found {1} processes".format(cmd, process_count))

        # curl to service
        cmd_curl = 'curl localhost:8234'
        self.ssh_manager.execute_on_remote(
            ip=slave3['ip'],
            cmd=cmd_curl
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3",
                               is_make=True)
Exemple #36
0
    def deploy_neutron_example_ha(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. check plugin health
            10. Run OSTF

        Duration 70m
        Snapshot deploy_neutron_example_ha

        """
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for node in ('slave-01', 'slave-02', 'slave-03'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'
            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(
                    0, res_pgrep['exit_code'], 'Failed with error {0} '
                    'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(
                    1, len(res_pgrep['stdout']),
                    'Failed with error {0} on the '
                    'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(
                    0, res_pgrep['exit_code'], 'Failed with error {0} '
                    'on node {1}'.format(res_curl['stderr'], node))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha")
    def deploy_neutron_example_ha_add_node(self):
        """Deploy and scale cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Add 2 nodes with controller role
            11. Deploy cluster
            12. Check plugin health
            13. Run OSTF

        Duration 150m
        Snapshot deploy_neutron_example_ha_add_node

        """
        checkers.check_plugin_path_env(
            var_name='EXAMPLE_PLUGIN_PATH',
            plugin_path=EXAMPLE_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=EXAMPLE_PLUGIN_PATH,
            tar_target='/var')

        # install plugin

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['tun'],
                "propagate_task_deploy": True
            }
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-04', 'slave-05'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'

            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(1, len(res_pgrep['stdout']),
                             'Failed with error {0} on the '
                             'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_curl['stderr'], node))

        # add verification here
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha_add_node")
    def separate_haproxy(self):
        """Deploy HA environment with separate Haproxy.

        Scenario:
            1. Revert snapshot with ready master node
            2. Copy and install external-lb and detach-haproxy plugins
            3. Bootstrap 3 slaves from default nodegroup
            4. Create cluster with Neutron VXLAN and custom nodegroups
            5. Run 'fuel-mirror' to replace cluster repositories
               with local mirrors
            6. Bootstrap 2 slaves nodes from second nodegroup
               and one node from third node group
            7. Enable plugins for cluster
            8. Add 2 controllers from default nodegroup and 1 controller
               from second node group
            9. Add 1 compute+cinder from default node group
               and 1 compute+cinder from second node group
            10. Add haproxy node from third node group
            11. Verify networks
            12. Deploy cluster

        Duration 120m
        Snapshot separate_haproxy
        """

        if not MULTIPLE_NETWORKS:
            raise exceptions.FuelQAVariableNotSet(
                'MULTIPLE_NETWORKS', 'true')

        self.show_step(1)
        self.env.revert_snapshot('ready')

        self.show_step(2)
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH,
            tar_target="/var")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=SEPARATE_SERVICE_BALANCER_PLUGIN_PATH,
            tar_target="/var")

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                SEPARATE_SERVICE_HAPROXY_PLUGIN_PATH))

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                SEPARATE_SERVICE_BALANCER_PLUGIN_PATH))

        self.show_step(3)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
        self.show_step(4)
        admin_ip = self.ssh_manager.admin_ip
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings={
                'net_provider': NEUTRON,
                'net_segment_type': NEUTRON_SEGMENT['tun'],
                'tenant': 'separatehaproxy',
                'user': '******',
                'password': '******',
                'ntp_list': [admin_ip],
            }
        )
        self.show_step(5)
        if MIRROR_UBUNTU != '':
            ubuntu_url = MIRROR_UBUNTU.split()[1]
            replace_cmd = \
                "sed -i 's,http://archive.ubuntu.com/ubuntu,{0},g'" \
                " /usr/share/fuel-mirror/ubuntu.yaml".format(
                    ubuntu_url)
            self.ssh_manager.execute_on_remote(ip=admin_ip,
                                               cmd=replace_cmd)
        create_mirror_cmd = 'fuel-mirror create -P ubuntu -G mos ubuntu'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_mirror_cmd)
        apply_mirror_cmd = 'fuel-mirror apply -P ubuntu -G mos ubuntu ' \
                           '--env {0} --replace'.format(cluster_id)
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_mirror_cmd)

        self.show_step(6)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[6:7])

        self.show_step(7)
        plugin_name = 'detach_haproxy'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        plugin_name = 'external_loadbalancer'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.show_step(8)
        self.show_step(9)
        self.show_step(10)
        nodegroup1 = NODEGROUPS[0]['name']
        nodegroup2 = NODEGROUPS[1]['name']
        nodegroup3 = NODEGROUPS[2]['name']

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': [['controller'], nodegroup1],
                'slave-02': [['controller'], nodegroup1],
                'slave-04': [['compute', 'cinder'], nodegroup2],
                'slave-05': [['controller'], nodegroup2],
                'slave-03': [['compute', 'cinder'], nodegroup1],
                'slave-07': [['standalone-haproxy'], nodegroup3]
            }
        )

        self.show_step(11)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(12)
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60,
                                          check_services=False)

        self.env.make_snapshot('separate_haproxy')
    def deploy_murano_with_glare_ha_one_controller(self):
        """Deploy cluster in ha mode with murano plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Add 1 node with murano role
            8. Deploy the cluster
            9. Run network verification
            10. Run sanity OSTF
            11. Run Murano Platform OSTF

        Duration 150m
        Snapshot deploy_murano_with_glare_ha_one_controller
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.MURANO_PLUGIN_PATH,
            tar_target="/var")
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.MURANO_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            configure_ssl=False
        )

        self.setup_murano_plugin(cluster_id, glare=True)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["compute"],
                "slave-03": ["cinder"],
                "slave-04": ["murano-node"]
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['sanity'])

        logger.debug('Run OSTF platform tests')

        test_class_main = ('fuel_health.tests.tests_platform'
                           '.test_murano_linux.MuranoDeployLinuxServicesTests')
        tests_names = ['test_deploy_dummy_app_with_glare']

        test_classes = []

        for test_name in tests_names:
            test_classes.append('{0}.{1}'.format(test_class_main,
                                                 test_name))

        for test_name in test_classes:
            self.fuel_web.run_single_ostf_test(
                cluster_id=cluster_id, test_sets=['tests_platform'],
                test_name=test_name, timeout=60 * 20)

        self.env.make_snapshot("deploy_murano_with_glare_ha_one_controller")
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.check_run("deploy_ha_one_controller_neutron_example_v3")
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_V3_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_V3_PATH)

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_V3_PATH,
                             tar_target='/var')
        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={"propagate_task_deploy": True})

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if fuel_plugin_example_v3_puppet called
        # between netconfig and connectivity_tests
        netconfig_str = 'MODULAR: netconfig/netconfig.pp'
        plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
        connect_str = 'MODULAR: netconfig/connectivity_tests.pp'
        checkers.check_log_lines_order(
            ip=slave1['ip'],
            log_file_path='/var/log/puppet.log',
            line_matcher=[netconfig_str, plugin_str, connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if service run on slave-03
        logger.debug("Checking service on node {0}".format('slave-03'))

        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.ssh_manager.execute_on_remote(ip=slave3['ip'],
                                                       cmd=cmd)
        process_count = len(res_pgrep['stdout'])
        assert_equal(
            1, process_count,
            "There should be 1 process 'fuel-simple-service',"
            " but {0} found {1} processes".format(cmd, process_count))

        # curl to service
        cmd_curl = 'curl localhost:8234'
        self.ssh_manager.execute_on_remote(ip=slave3['ip'], cmd=cmd_curl)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3",
                               is_make=True)
    def deploy_neutron_lbaas_simple_reset_ready(self):
        """Deploy and re-deploy cluster in simple mode with LbaaS plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check health of lbaas agent on the node
            9. Create pool and vip
            10. Reset cluster
            11. Add 1 compute
            12. Re-deploy cluster
            13. Check health of lbaas agent on the node
            14. Create pool and vip
            15. Run OSTF

        Duration 65m
        Snapshot deploy_neutron_lbaas_simple_reset_ready

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=LBAAS_PLUGIN_PATH,
            tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(LBAAS_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_SIMPLE,
        )

        plugin_name = 'lbaas'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        asserts.assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        logger.debug('we have lbaas element')
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        cluster = self.fuel_web.client.get_cluster(cluster_id)
        asserts.assert_equal(str(cluster['net_provider']), 'neutron')

        self.fuel_web.verify_network(cluster_id)

        controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
        os_conn = os_actions.OpenStackActions(controller['ip'])

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbaas_work(os_conn)

        self.fuel_web.stop_reset_env_wait(cluster_id)

        self.fuel_web.wait_nodes_get_online_state(
            self.env.d_env.nodes().slaves[:2])

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-03': ['compute'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.check_neutron_agents_statuses(os_conn)

        self.check_lbaas_work(os_conn)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_lbaas_simple_reset_ready")