def __init__(self):
        super(ExamplePluginPostDeploy, self).__init__()
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_V4_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_V4_PATH)

        self.__primary_controller = None
        self.__controllers = None
        self.__cluster_id = None
    def __init__(self):
        super(ExamplePluginPostDeploy, self).__init__()
        checkers.check_plugin_path_env(
            var_name='EXAMPLE_PLUGIN_V4_PATH',
            plugin_path=EXAMPLE_PLUGIN_V4_PATH
        )

        self.__primary_controller = None
        self.__controllers = None
        self.__cluster_id = None
 def __init__(self):
     super(TestLmaCollectorPlugin, self).__init__()
     check_plugin_path_env(var_name='LMA_COLLECTOR_PLUGIN_PATH',
                           plugin_path=settings.LMA_COLLECTOR_PLUGIN_PATH)
     check_plugin_path_env(
         var_name='ELASTICSEARCH_KIBANA_PLUGIN_PATH',
         plugin_path=settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH)
     check_plugin_path_env(
         var_name='INFLUXDB_GRAFANA_PLUGIN_PATH',
         plugin_path=settings.INFLUXDB_GRAFANA_PLUGIN_PATH)
     check_plugin_path_env(
         var_name='LMA_INFRA_ALERTING_PLUGIN_PATH',
         plugin_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH)
 def __init__(self):
     super(TestLmaCollectorPlugin, self).__init__()
     check_plugin_path_env(
         var_name='LMA_COLLECTOR_PLUGIN_PATH',
         plugin_path=settings.LMA_COLLECTOR_PLUGIN_PATH
     )
     check_plugin_path_env(
         var_name='ELASTICSEARCH_KIBANA_PLUGIN_PATH',
         plugin_path=settings.ELASTICSEARCH_KIBANA_PLUGIN_PATH
     )
     check_plugin_path_env(
         var_name='INFLUXDB_GRAFANA_PLUGIN_PATH',
         plugin_path=settings.INFLUXDB_GRAFANA_PLUGIN_PATH
     )
     check_plugin_path_env(
         var_name='LMA_INFRA_ALERTING_PLUGIN_PATH',
         plugin_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH
     )
示例#5
0
 def __init__(self):
     super(BaseDeployPlatformComponents, self).__init__()
     check_plugin_path_env(
         var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
         plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH)
     check_plugin_path_env(
         var_name='SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH',
         plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH)
     check_plugin_path_env(
         var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH',
         plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH)
 def __init__(self):
     super(ContrailPlugin, self).__init__()
     check_plugin_path_env(
         var_name='CONTRAIL_PLUGIN_PATH',
         plugin_path=CONTRAIL_PLUGIN_PATH
     )
     check_plugin_path_env(
         var_name='CONTRAIL_PLUGIN_PACK_UB_PATH',
         plugin_path=CONTRAIL_PLUGIN_PACK_UB_PATH
     )
     check_plugin_path_env(
         var_name='CONTRAIL_PLUGIN_PACK_CEN_PATH',
         plugin_path=CONTRAIL_PLUGIN_PACK_CEN_PATH
     )
 def __init__(self):
     super(BaseDeployPlatformComponents, self).__init__()
     check_plugin_path_env(
         var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
         plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH
     )
     check_plugin_path_env(
         var_name='SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH',
         plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH
     )
     check_plugin_path_env(
         var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH',
         plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH
     )
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.check_run("deploy_ha_one_controller_neutron_example_v3")
        checkers.check_plugin_path_env(
            var_name='EXAMPLE_PLUGIN_V3_PATH',
            plugin_path=EXAMPLE_PLUGIN_V3_PATH
        )

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=EXAMPLE_PLUGIN_V3_PATH,
            tar_target='/var'
        )
        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={"propagate_task_deploy": True}
        )

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if fuel_plugin_example_v3_puppet called
        # between netconfig and connectivity_tests
        netconfig_str = 'MODULAR: netconfig/netconfig.pp'
        plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
        connect_str = 'MODULAR: netconfig/connectivity_tests.pp'
        checkers.check_log_lines_order(
            ip=slave1['ip'],
            log_file_path='/var/log/puppet.log',
            line_matcher=[netconfig_str,
                          plugin_str,
                          connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if service run on slave-03
        logger.debug("Checking service on node {0}".format('slave-03'))

        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.ssh_manager.execute_on_remote(
            ip=slave3['ip'],
            cmd=cmd
        )
        process_count = len(res_pgrep['stdout'])
        assert_equal(1, process_count,
                     "There should be 1 process 'fuel-simple-service',"
                     " but {0} found {1} processes".format(cmd, process_count))

        # curl to service
        cmd_curl = 'curl localhost:8234'
        self.ssh_manager.execute_on_remote(
            ip=slave3['ip'],
            cmd=cmd_curl
        )
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3",
                               is_make=True)
 def __init__(self):
     super(EMCPlugin, self).__init__()
     check_plugin_path_env(var_name='EMC_PLUGIN_PATH',
                           plugin_path=settings.EMC_PLUGIN_PATH)
    def deploy_ha_one_controller_neutron_example(self):
        """Deploy cluster with one controller and example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin health
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example
        """
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        segment_type = NEUTRON_SEGMENT['vlan']
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      segment_type,
                                                      "propagate_task_deploy":
                                                      True
                                                  })

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['compute']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example")
示例#11
0
    def deploy_neutron_example_ha(self):
        """Deploy cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. check plugin health
            10. Run OSTF

        Duration 70m
        Snapshot deploy_neutron_example_ha

        """
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['cinder']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        for node in ('slave-01', 'slave-02', 'slave-03'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'
            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(
                    0, res_pgrep['exit_code'], 'Failed with error {0} '
                    'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(
                    1, len(res_pgrep['stdout']),
                    'Failed with error {0} on the '
                    'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(
                    0, res_pgrep['exit_code'], 'Failed with error {0} '
                    'on node {1}'.format(res_curl['stderr'], node))

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha")
 def __init__(self):
     super(ZabbixPlugin, self).__init__()
     check_plugin_path_env(var_name='ZABBIX_PLUGIN_PATH',
                           plugin_path=settings.ZABBIX_PLUGIN_PATH)
    def separate_keystone_ceph_service(self):
        """Deployment with separate keystone nodes and ceph for all

        Scenario:
            1. Install database and keystone plugins on the master node
            2. Create Ubuntu, Neutron VXLAN, ceph for all storages cluster
            3. Change ceph replication factor to 2
            4. Add 3 nodes with controller role
            5. Add 3 nodes with database+keystone role
            6. Add 1 compute node
            7. Add 2 ceph nodes
            8. Run network verification
            9. Deploy changes
            10. Run network verification
            11. Run OSTF tests

        Duration 120m
        Snapshot separate_keystone_ceph_service
        """
        self.check_run("separate_keystone_ceph_service")

        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH)
        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH)
        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
                             tar_target="/var")

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': '2',
            'tenant': 'separatekeystoneceph',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_names = ['detach-database', 'detach-keystone']
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        for plugin_name in plugin_names:
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                msg)
            options = {'metadata/enabled': True}
            self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database', 'standalone-keystone'],
                'slave-05': ['standalone-database', 'standalone-keystone'],
                'slave-06': ['standalone-database', 'standalone-keystone'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_keystone_ceph_service")
示例#14
0
 def __init__(self):
     super(TestElasticsearchPlugin, self).__init__()
     check_plugin_path_env(var_name='ELASTICSEARCH_KIBANA_PLUGIN_PATH',
                           plugin_path=ELASTICSEARCH_KIBANA_PLUGIN_PATH)
示例#15
0
 def __init__(self):
     super(TestInfluxdbPlugin, self).__init__()
     check_plugin_path_env(
         var_name='INFLUXDB_GRAFANA_PLUGIN_PATH',
         plugin_path=INFLUXDB_GRAFANA_PLUGIN_PATH
     )
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [settings.ZABBIX_PLUGIN_PATH,
                       settings.ZABBIX_SNMP_PLUGIN_PATH]:
            utils.upload_tarball(
                ip=self.ssh_manager.admin_ip,
                tar_path=plugin,
                tar_target="/var")
            utils.install_plugin_check_code(
                ip=self.ssh_manager.admin_ip,
                plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0,
                 timeout_msg='SNMP heartbeat status not found '
                             ' in /var/log/zabbix/zabbix_server.log')

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
 def __init__(self):
     super(LbaasPlugin, self).__init__()
     check_plugin_path_env(
         var_name='LBAAS_PLUGIN_PATH',
         plugin_path=LBAAS_PLUGIN_PATH
     )
示例#18
0
 def __init__(self):
     super(TestLmaInfraAlertingPlugin, self).__init__()
     check_plugin_path_env(
         var_name='LMA_INFRA_ALERTING_PLUGIN_PATH',
         plugin_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH)
    def deploy_zabbix_snmptrap_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check zabbix service in pacemaker
            11. Check login to zabbix dashboard
            12. Check SNMP services on controllers
            13. Check test SNMP trap

        Duration 70m
        Snapshot deploy_zabbix_snmptrap_ha

        """
        check_plugin_path_env(var_name='ZABBIX_SNMP_PLUGIN_PATH',
                              plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [
                settings.ZABBIX_PLUGIN_PATH, settings.ZABBIX_SNMP_PLUGIN_PATH
        ]:
            utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                                 tar_path=plugin,
                                 tar_target="/var")
            utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                            plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id)
        self.setup_snmp_plugin(cluster_id, snmp_community)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.check_zabbix_configuration(cluster_id, zabbix_username,
                                        zabbix_password)

        for node_name in ['slave-01', 'slave-02', 'slave-03']:
            with self.fuel_web.get_ssh_for_node(node_name) as remote:
                cmd = 'pgrep {0}'
                response = \
                    ''.join(remote.execute(cmd.format('snmptrapd'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptrapd'))
                response = \
                    ''.join(remote.execute(cmd.format('snmptt'))["stdout"])
                assert_not_equal(response.strip(), "OK",
                                 "Service {0} not started".format('snmptt'))

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_heartbeat_command = \
            ("snmptrap -v 2c -c {0} {1} '' .1.3.6.1.4.1.8072.2.3.0.1"
             .format(snmp_community, management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_heartbeat_command)

        mgmt_vip_devops_node = self.fuel_web.get_pacemaker_resource_location(
            'slave-01', 'vip__management')[0]
        mgmt_vip_nailgun_node = self.fuel_web.get_nailgun_node_by_devops_node(
            mgmt_vip_devops_node)

        with self.env.d_env.get_ssh_to_remote(
                mgmt_vip_nailgun_node['ip']) as remote:
            cmd = ('grep netSnmpExampleHeartbeatNotification '
                   '/var/log/zabbix/zabbix_server.log | '
                   'grep "Status Events"')

            wait(lambda: remote.execute(cmd)['exit_code'] == 0)

        self.env.make_snapshot("deploy_zabbix_snmptrap_ha")
    def deploy_neutron_example_ha_add_node(self):
        """Deploy and scale cluster in ha mode with example plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 nodes with compute role
            6. Add 1 nodes with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Add 2 nodes with controller role
            11. Deploy cluster
            12. Check plugin health
            13. Run OSTF

        Duration 150m
        Snapshot deploy_neutron_example_ha_add_node

        """
        checkers.check_plugin_path_env(
            var_name='EXAMPLE_PLUGIN_PATH',
            plugin_path=EXAMPLE_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_PATH)

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=EXAMPLE_PLUGIN_PATH,
            tar_target='/var')

        # install plugin

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['tun'],
                "propagate_task_deploy": True
            }
        )

        plugin_name = 'fuel_plugin_example'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if service ran on controller
        logger.debug("Start to check service on node {0}".format('slave-01'))
        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            res_pgrep = remote.execute(cmd)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            assert_equal(1, len(res_pgrep['stdout']),
                         'Failed with error {0}'.format(res_pgrep['stderr']))
            # curl to service
            res_curl = remote.execute(cmd_curl)
            assert_equal(0, res_pgrep['exit_code'],
                         'Failed with error {0}'.format(res_curl['stderr']))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller'],
                'slave-05': ['controller'],
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        for node in ('slave-01', 'slave-04', 'slave-05'):
            logger.debug("Start to check service on node {0}".format(node))
            cmd_curl = 'curl localhost:8234'
            cmd = 'pgrep -f fuel-simple-service'

            with self.fuel_web.get_ssh_for_node(node) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(1, len(res_pgrep['stdout']),
                             'Failed with error {0} on the '
                             'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_curl['stderr'], node))

        # add verification here
        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("deploy_neutron_example_ha_add_node")
示例#21
0
    def separate_rabbit_service(self):
        """Deploy cluster with 3 separate rabbit roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with rabbit role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_rabbit_service
        """
        self.check_run("separate_rabbit_service")
        checkers.check_plugin_path_env(
            var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_KEYSTONE_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH))

        data = {
            'tenant': 'separaterabbit',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-rabbitmq'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-rabbitmq'],
                'slave-05': ['standalone-rabbitmq'],
                'slave-06': ['standalone-rabbitmq'],
                'slave-07': ['compute'],
                'slave-08': ['cinder']
            }
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("separate_rabbit_service", is_make=True)
 def __init__(self):
     super(MuranoPlugin, self).__init__()
     check_plugin_path_env(
         var_name='MURANO_PLUGIN_PATH',
         plugin_path=settings.MURANO_PLUGIN_PATH
     )
示例#23
0
 def __init__(self):
     super(GlusterfsPlugin, self).__init__()
     check_plugin_path_env(
         var_name='GLUSTER_PLUGIN_PATH',
         plugin_path=GLUSTER_PLUGIN_PATH
     )
示例#24
0
 def __init__(self):
     super(EMCPlugin, self).__init__()
     check_plugin_path_env(
         var_name='EMC_PLUGIN_PATH',
         plugin_path=settings.EMC_PLUGIN_PATH
     )
示例#25
0
    def separate_db_ceph_service(self):
        """Deployment with separate db nodes and ceph for all

        Scenario:
            1. Install the plugin on the master node
            2. Create Ubuntu, Neutron VXLAN, ceph for all cluster
            3. Change ceph replication factor to 2
            4. Add 3 nodes with controller role
            5. Add 3 nodes with db role
            6. Add 1 compute node
            7. Add 2 ceph nodes
            8. Run network verification
            9. Deploy changes
            10. Run network verification
            11. Run OSTF tests

        Duration 120m
        Snapshot separate_db_ceph_service
        """
        self.check_run("separate_db_ceph_service")
        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_DB_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_DB_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_DB_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'osd_pool_size': '2',
            'objects_ceph': True,
            'tenant': 'separatedbceph',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-database'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-database'],
                'slave-05': ['standalone-database'],
                'slave-06': ['standalone-database'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            }
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("separate_db_ceph_service")
    def deploy_zabbix_snmp_extreme_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check Extreme Switch trigger with test SNMP message

        Duration 70m
        Snapshot deploy_zabbix_snmp_extreme_ha

        """
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH
        )
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_EXTREME_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH
        )
        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [settings.ZABBIX_PLUGIN_PATH,
                       settings.ZABBIX_SNMP_PLUGIN_PATH,
                       settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH]:
            utils.upload_tarball(
                ip=self.ssh_manager.admin_ip,
                tar_path=plugin,
                tar_target="/var")
            utils.install_plugin_check_code(
                ip=self.ssh_manager.admin_ip,
                plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)
        self.setup_snmp_plugin(cluster_id, snmp_community)
        self.setup_snmp_extreme_plugin(cluster_id)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_extreme_critical_command = \
            ("snmptrap -v 1 -c {snmp_community} {management_vip} "
             "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916"
             " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'"
             .format(snmp_community=snmp_community,
                     management_vip=management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_extreme_critical_command)

        public_vip = self.fuel_web.get_public_vip(cluster_id)
        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        wait(lambda: self.check_event_message(
            zabbix_web, 'extreme', 'Power Supply Failed'),
            timeout_msg='Power Supply Failed event not found in Zabbix')

        self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")
    def deploy_zabbix_snmp_extreme_ha(self):
        """Deploy cluster in ha mode with zabbix snmptrap plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugins
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 1 node with compute role
            6. Add 1 node with cinder role
            7. Deploy the cluster
            8. Run network verification
            9. Run OSTF
            10. Check Extreme Switch trigger with test SNMP message

        Duration 70m
        Snapshot deploy_zabbix_snmp_extreme_ha

        """
        check_plugin_path_env(var_name='ZABBIX_SNMP_PLUGIN_PATH',
                              plugin_path=settings.ZABBIX_SNMP_PLUGIN_PATH)
        check_plugin_path_env(
            var_name='ZABBIX_SNMP_EXTREME_PLUGIN_PATH',
            plugin_path=settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH)
        self.env.revert_snapshot("ready_with_5_slaves")

        for plugin in [
                settings.ZABBIX_PLUGIN_PATH, settings.ZABBIX_SNMP_PLUGIN_PATH,
                settings.ZABBIX_SNMP_EXTREME_PLUGIN_PATH
        ]:
            utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                                 tar_path=plugin,
                                 tar_target="/var")
            utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                            plugin=os.path.basename(plugin))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        zabbix_username = '******'
        zabbix_password = '******'
        snmp_community = 'public'

        self.setup_zabbix_plugin(cluster_id, zabbix_username, zabbix_password)
        self.setup_snmp_plugin(cluster_id, snmp_community)
        self.setup_snmp_extreme_plugin(cluster_id)

        self.fuel_web.update_nodes(
            cluster_id, {
                "slave-01": ["controller"],
                "slave-02": ["controller"],
                "slave-03": ["controller"],
                "slave-04": ["compute"],
                "slave-05": ["cinder"]
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        management_vip = self.fuel_web.get_mgmt_vip(cluster_id)
        snmp_extreme_critical_command = \
            ("snmptrap -v 1 -c {snmp_community} {management_vip} "
             "'.1.3.6.1.4.1.1916' {management_vip} 6 10 '10' .1.3.6.1.4.1.1916"
             " s 'null' .1.3.6.1.4.1.1916 s 'null' .1.3.6.1.4.1.1916 s '2'"
             .format(snmp_community=snmp_community,
                     management_vip=management_vip))

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            remote.execute("apt-get install snmp -y")
            remote.execute(snmp_extreme_critical_command)

        public_vip = self.fuel_web.get_public_vip(cluster_id)
        zabbix_web = ZabbixWeb(public_vip, zabbix_username, zabbix_password)
        zabbix_web.login()

        wait(lambda: self.check_event_message(zabbix_web, 'extreme',
                                              'Power Supply Failed'))

        self.env.make_snapshot("deploy_zabbix_snmp_extreme_ha")
示例#28
0
 def __init__(self):
     super(TestInfluxdbPlugin, self).__init__()
     check_plugin_path_env(var_name='INFLUXDB_GRAFANA_PLUGIN_PATH',
                           plugin_path=INFLUXDB_GRAFANA_PLUGIN_PATH)
 def __init__(self):
     super(ZabbixPlugin, self).__init__()
     check_plugin_path_env(
         var_name='ZABBIX_PLUGIN_PATH',
         plugin_path=settings.ZABBIX_PLUGIN_PATH
     )
示例#30
0
 def __init__(self):
     super(TestElasticsearchPlugin, self).__init__()
     check_plugin_path_env(
         var_name='ELASTICSEARCH_KIBANA_PLUGIN_PATH',
         plugin_path=ELASTICSEARCH_KIBANA_PLUGIN_PATH
     )
示例#31
0
 def __init__(self):
     super(GlusterfsPlugin, self).__init__()
     check_plugin_path_env(var_name='GLUSTER_PLUGIN_PATH',
                           plugin_path=GLUSTER_PLUGIN_PATH)
示例#32
0
    def separate_horizon_service(self):
        """Deploy cluster with 3 separate horizon roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with horizon role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_horizon_service
        """
        self.check_run("separate_horizon_service")
        check_plugin_path_env(
            var_name='SEPARATE_SERVICE_HORIZON_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH)

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_HORIZON_PLUGIN_PATH))

        data = {
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': '2',
            'tenant': 'separatehorizon',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-horizon'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-horizon'],
                'slave-05': ['standalone-horizon'],
                'slave-06': ['standalone-horizon'],
                'slave-07': ['compute'],
                'slave-08': ['ceph-osd'],
                'slave-09': ['ceph-osd']
            })

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("separate_horizon_service", is_make=True)
    def deploy_ha_one_controller_neutron_example_v3(self):
        """Deploy cluster with one controller and example plugin v3

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with custom role
            7. Deploy the cluster
            8. Run network verification
            9. Check plugin health
            10. Run OSTF

        Duration 35m
        Snapshot deploy_ha_one_controller_neutron_example_v3
        """
        self.check_run("deploy_ha_one_controller_neutron_example_v3")
        checkers.check_plugin_path_env(var_name='EXAMPLE_PLUGIN_V3_PATH',
                                       plugin_path=EXAMPLE_PLUGIN_V3_PATH)

        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node
        checkers.check_archive_type(EXAMPLE_PLUGIN_V3_PATH)
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=EXAMPLE_PLUGIN_V3_PATH,
                             tar_target='/var')
        # install plugin
        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(EXAMPLE_PLUGIN_V3_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={"propagate_task_deploy": True})

        plugin_name = 'fuel_plugin_example_v3'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['fuel_plugin_example_v3']
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)
        self.fuel_web.verify_network(cluster_id)

        # check if slave-01 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh]
        slave1 = self.fuel_web.get_nailgun_node_by_name('slave-01')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave1['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave1['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if fuel_plugin_example_v3_puppet called
        # between netconfig and connectivity_tests
        netconfig_str = 'MODULAR: netconfig/netconfig.pp'
        plugin_str = 'PLUGIN: fuel_plugin_example_v3 - deploy.pp'
        connect_str = 'MODULAR: netconfig/connectivity_tests.pp'
        checkers.check_log_lines_order(
            ip=slave1['ip'],
            log_file_path='/var/log/puppet.log',
            line_matcher=[netconfig_str, plugin_str, connect_str])

        # check if slave-02 contain
        # plugin+100.0.all
        # plugin+100.al
        slave2 = self.fuel_web.get_nailgun_node_by_name('slave-02')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave2['ip'], '/tmp/plugin+100.all')

        # check if slave-03 contain
        # plugin+100.0.all
        # plugin+100.all
        # fuel_plugin_example_v3_sh
        # fuel_plugin_example_v3_puppet
        slave3 = self.fuel_web.get_nailgun_node_by_name('slave-03')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.0.all')
        checkers.check_file_exists(slave3['ip'], '/tmp/plugin+100.all')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_sh')
        checkers.check_file_exists(slave3['ip'],
                                   '/tmp/fuel_plugin_example_v3_puppet')

        # check if service run on slave-03
        logger.debug("Checking service on node {0}".format('slave-03'))

        cmd = 'pgrep -f fuel-simple-service'
        res_pgrep = self.ssh_manager.execute_on_remote(ip=slave3['ip'],
                                                       cmd=cmd)
        process_count = len(res_pgrep['stdout'])
        assert_equal(
            1, process_count,
            "There should be 1 process 'fuel-simple-service',"
            " but {0} found {1} processes".format(cmd, process_count))

        # curl to service
        cmd_curl = 'curl localhost:8234'
        self.ssh_manager.execute_on_remote(ip=slave3['ip'], cmd=cmd_curl)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_one_controller_neutron_example_v3",
                               is_make=True)
 def __init__(self):
     super(TestLmaInfraAlertingPlugin, self).__init__()
     check_plugin_path_env(
         var_name='LMA_INFRA_ALERTING_PLUGIN_PATH',
         plugin_path=settings.LMA_INFRA_ALERTING_PLUGIN_PATH
     )
示例#35
0
 def __init__(self):
     super(MuranoPlugin, self).__init__()
     check_plugin_path_env(var_name='MURANO_PLUGIN_PATH',
                           plugin_path=settings.MURANO_PLUGIN_PATH)