class MapR401MRv2GatingTest(mapr_test.MapRGatingTest):
    mapr_config = cfg.ITConfig().mapr_401mrv2_config
    SKIP_MAP_REDUCE_TEST = mapr_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = mapr_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = mapr_config.SKIP_SCALING_TEST
    SKIP_CINDER_TEST = mapr_config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = mapr_config.SKIP_EDP_TEST

    def get_plugin_config(self):
        return MapR401MRv2GatingTest.mapr_config

    def setUp(self):
        super(MapR401MRv2GatingTest, self).setUp()
        self._tt_name = 'nodemanager'
        self._mr_version = 2
        self._node_processes = [
            'NodeManager', 'ResourceManager', 'HistoryServer', 'FileServer',
            'CLDB', 'ZooKeeper', 'Oozie', 'Webserver'
        ]

    @testcase.skipIf(
        cfg.ITConfig().mapr_401mrv2_config.SKIP_ALL_TESTS_FOR_PLUGIN,
        "All tests for MapR plugin were skipped")
    @testcase.attr('mapr401mrv2')
    def test_mapr_plugin_gating(self):
        super(MapR401MRv2GatingTest, self).test_mapr_plugin_gating()
class MapR311GatingTest(mapr_test.MapRGatingTest):
    mapr_config = cfg.ITConfig().mapr_311_config
    SKIP_MAP_REDUCE_TEST = mapr_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = mapr_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = mapr_config.SKIP_SCALING_TEST
    SKIP_CINDER_TEST = mapr_config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = mapr_config.SKIP_EDP_TEST

    def get_plugin_config(self):
        return cfg.ITConfig().mapr_311_config

    def setUp(self):
        super(MapR311GatingTest, self).setUp()
        self._tt_name = 'tasktracker'
        self._mr_version = 1
        self._mkdir_cmd = 'sudo -u %(user)s hadoop fs -mkdir %(path)s'
        self._node_processes = [
            'TaskTracker',
            'JobTracker',
            'FileServer',
            'CLDB',
            'ZooKeeper',
            'Oozie',
            'Webserver'
        ]
        self._master_node_processes = [
            'Metrics',
            'Webserver',
            'ZooKeeper',
            'HTTPFS',
            'TaskTracker',
            'JobTracker',
            'Oozie',
            'FileServer',
            'CLDB',
        ]
        self._worker_node_processes = [
            'TaskTracker',
            'HiveServer2',
            'HiveMetastore',
            'FileServer',
        ]

    @testcase.skipIf(
        cfg.ITConfig().mapr_311_config.SKIP_ALL_TESTS_FOR_PLUGIN,
        "All tests for MapR plugin were skipped")
    @testcase.attr('mapr311')
    def test_mapr_plugin_gating(self):
        super(MapR311GatingTest, self).test_mapr_plugin_gating()
class MapR402MRv1GatingTest(mapr_test.MapRGatingTest):
    mapr_config = cfg.ITConfig().mapr_402mrv1_config
    SKIP_MAP_REDUCE_TEST = mapr_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = mapr_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = mapr_config.SKIP_SCALING_TEST
    SKIP_CINDER_TEST = mapr_config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = mapr_config.SKIP_EDP_TEST

    def get_plugin_config(self):
        return MapR402MRv1GatingTest.mapr_config

    def setUp(self):
        super(MapR402MRv1GatingTest, self).setUp()
        self._tt_name = 'tasktracker'
        self._mr_version = 1
        self._node_processes = [
            'TaskTracker', 'JobTracker', 'FileServer', 'CLDB', 'ZooKeeper',
            'Oozie', 'Webserver', 'Metrics', 'Sqoop2-Server', 'Sqoop2-Client',
            'Pig', 'Mahout', 'Hue', 'HTTPFS', 'HiveMetastore', 'HiveServer2',
            'Flume', 'Drill'
        ]
        self._master_node_processes = [
            'Flume',
            'Hue',
            'Metrics',
            'Webserver',
            'ZooKeeper',
            'HTTPFS',
            'TaskTracker',
            'JobTracker',
            'Oozie',
            'FileServer',
            'CLDB',
        ]
        self._worker_node_processes = [
            'TaskTracker',
            'HiveServer2',
            'HiveMetastore',
            'FileServer',
            'Sqoop2-Client',
            'Sqoop2-Server',
        ]

    @testcase.skipIf(
        cfg.ITConfig().mapr_402mrv1_config.SKIP_ALL_TESTS_FOR_PLUGIN,
        "All tests for MapR plugin were skipped")
    @testcase.attr('mapr402mrv1')
    def test_mapr_plugin_gating(self):
        super(MapR402MRv1GatingTest, self).test_mapr_plugin_gating()
示例#4
0
    def _prepare_test(self):
        self.spark_config = cfg.ITConfig().spark_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = (
                self.get_floating_ip_pool_id_for_neutron_net())

        self.spark_config.IMAGE_ID, self.spark_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.spark_config))
示例#5
0
    def _prepare_test(self):
        self.plugin_config = cfg.ITConfig().vanilla_two_config
        self.SKIP_EDP_TEST = self.plugin_config.SKIP_EDP_TEST
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = (
                self.get_floating_ip_pool_id_for_neutron_net())

        (self.vanilla_two_config.IMAGE_ID,
         self.vanilla_two_config.SSH_USERNAME) = (
             self.get_image_id_and_ssh_username(self.vanilla_two_config))
示例#6
0
    def setUp(self):
        super(ITestCase, self).setUp()
        self.common_config = cfg.ITConfig().common_config
        self.plugin_config = self.get_plugin_config()
        self._setup_clients()
        self._setup_networks()
        self._setup_volume_params()
        self._setup_flavor()
        self._setup_ssh_access()

        self._image_id, self._ssh_username = (
            self.get_image_id_and_ssh_username())

        telnetlib.Telnet(self.common_config.SAHARA_HOST,
                         self.common_config.SAHARA_PORT)
示例#7
0
    def setUp(self):
        super(IDH3GatingTest, self).setUp()

        self.idh3_config = cfg.ITConfig().idh3_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = \
                self.get_floating_ip_pool_id_for_neutron_net()

        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []
        self.idh3_config.IMAGE_ID, self.idh3_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.idh3_config))
示例#8
0
    def _prepare_test(self):
        self.plugin_config = cfg.ITConfig().vanilla_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = (
                self.get_floating_ip_pool_id_for_neutron_net())

        self.plugin_config.IMAGE_ID, self.plugin_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.plugin_config))

        self.volumes_per_node = 0
        self.volumes_size = 0
        if not self.SKIP_CINDER_TEST:
            self.volumes_per_node = 2
            self.volumes_size = 2
示例#9
0
文件: base.py 项目: qinweiwei/sahara
    def setUp(self):
        super(ITestCase, self).setUp()
        self.common_config = cfg.ITConfig().common_config
        self.vanilla_config = cfg.ITConfig().vanilla_config
        self.vanilla_two_config = cfg.ITConfig().vanilla_two_config
        self.hdp_config = cfg.ITConfig().hdp_config
        self.idh2_config = cfg.ITConfig().idh2_config
        self.idh3_config = cfg.ITConfig().idh3_config

        telnetlib.Telnet(self.common_config.SAHARA_HOST,
                         self.common_config.SAHARA_PORT)

        self.sahara = sahara_client.Client(
            version=self.common_config.SAHARA_API_VERSION,
            username=self.common_config.OS_USERNAME,
            api_key=self.common_config.OS_PASSWORD,
            project_name=self.common_config.OS_TENANT_NAME,
            auth_url=self.common_config.OS_AUTH_URL,
            sahara_url='http://%s:%s/v%s/%s' %
            (self.common_config.SAHARA_HOST, self.common_config.SAHARA_PORT,
             self.common_config.SAHARA_API_VERSION,
             self.common_config.OS_TENANT_ID))

        self.nova = nova_client.Client(
            username=self.common_config.OS_USERNAME,
            api_key=self.common_config.OS_PASSWORD,
            project_id=self.common_config.OS_TENANT_NAME,
            auth_url=self.common_config.OS_AUTH_URL)

        self.neutron = neutron_client.Client(
            username=self.common_config.OS_USERNAME,
            password=self.common_config.OS_PASSWORD,
            tenant_name=self.common_config.OS_TENANT_NAME,
            auth_url=self.common_config.OS_AUTH_URL)

        if not self.common_config.FLAVOR_ID:
            self.flavor_id = self.nova.flavors.create(name='i-test-flavor-%s' %
                                                      str(uuid.uuid4())[:8],
                                                      ram=1024,
                                                      vcpus=1,
                                                      disk=10,
                                                      ephemeral=10).id

        else:
            self.flavor_id = self.common_config.FLAVOR_ID

        if not self.common_config.PATH_TO_SSH_KEY:
            self.common_config.USER_KEYPAIR_ID += str(uuid.uuid4())[:8]
            self.private_key = self.nova.keypairs.create(
                self.common_config.USER_KEYPAIR_ID).private_key

        else:
            self.private_key = open(self.common_config.PATH_TO_SSH_KEY).read()
示例#10
0
    def setUp(self):
        super(VanillaTwoGatingTest, self).setUp()

        self.vanilla_two_config = cfg.ITConfig().vanilla_two_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = \
                self.get_floating_ip_pool_id_for_neutron_net()

        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []
        self.vanilla_two_config.IMAGE_ID, self.vanilla_two_config.SSH_USERNAME\
            = (self.get_image_id_and_ssh_username(self.vanilla_two_config))

        self.volumes_per_node = 0
        self.volume_size = 0
        if not self.SKIP_CINDER_TEST:
            self.volumes_per_node = 2
            self.volume_size = 2
class VanillaTwoGatingTest(cluster_configs.ClusterConfigTest,
                           map_reduce.MapReduceTest, swift.SwiftTest,
                           scaling.ScalingTest, cinder.CinderVolumeTest,
                           edp.EDPTest):

    vanilla_two_config = cfg.ITConfig().vanilla_two_config
    SKIP_MAP_REDUCE_TEST = vanilla_two_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = vanilla_two_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = vanilla_two_config.SKIP_SCALING_TEST
    SKIP_CINDER_TEST = vanilla_two_config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = vanilla_two_config.SKIP_EDP_TEST

    def setUp(self):
        super(VanillaTwoGatingTest, self).setUp()
        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []

    def _prepare_test(self):
        self.vanilla_two_config = cfg.ITConfig().vanilla_two_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = (
                self.get_floating_ip_pool_id_for_neutron_net())

        (self.vanilla_two_config.IMAGE_ID,
         self.vanilla_two_config.SSH_USERNAME) = (
             self.get_image_id_and_ssh_username(self.vanilla_two_config))

        self.volumes_per_node = 0
        self.volumes_size = 0
        if not self.SKIP_CINDER_TEST:
            self.volumes_per_node = 2
            self.volumes_size = 2

    ng_params = {
        'MapReduce': {
            'yarn.app.mapreduce.am.resource.mb': 256,
            'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
        },
        'YARN': {
            'yarn.scheduler.minimum-allocation-mb': 256,
            'yarn.scheduler.maximum-allocation-mb': 1024,
            'yarn.nodemanager.vmem-check-enabled': False
        }
    }

    @b.errormsg("Failure while 'nm-dn' node group template creation: ")
    def _create_nm_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-vanilla-nm-dn',
            'plugin_config': self.vanilla_two_config,
            'description': 'test node group template for Vanilla plugin',
            'node_processes': ['nodemanager', 'datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': self.ng_params
        }
        self.ng_tmpl_nm_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_nm_dn_id)

    @b.errormsg("Failure while 'nm' node group template creation: ")
    def _create_nm_ng_template(self):
        template = {
            'name': 'test-node-group-template-vanilla-nm',
            'plugin_config': self.vanilla_two_config,
            'description': 'test node group template for Vanilla plugin',
            'volumes_per_node': self.volumes_per_node,
            'volumes_size': self.volumes_size,
            'node_processes': ['nodemanager'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': self.ng_params
        }
        self.ng_tmpl_nm_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_nm_id)

    @b.errormsg("Failure while 'dn' node group template creation: ")
    def _create_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-vanilla-dn',
            'plugin_config': self.vanilla_two_config,
            'description': 'test node group template for Vanilla plugin',
            'volumes_per_node': self.volumes_per_node,
            'volumes_size': self.volumes_size,
            'node_processes': ['datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': self.ng_params
        }
        self.ng_tmpl_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_dn_id)

    @b.errormsg("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        template = {
            'name':
            'test-cluster-template-vanilla',
            'plugin_config':
            self.vanilla_two_config,
            'description':
            'test cluster template for Vanilla plugin',
            'cluster_configs': {
                'HDFS': {
                    'dfs.replication': 1
                }
            },
            'node_groups': [{
                'name': 'master-node-rm-nn',
                'flavor_id': self.flavor_id,
                'node_processes': ['namenode', 'resourcemanager'],
                'floating_ip_pool': self.floating_ip_pool,
                'count': 1,
                'node_configs': self.ng_params
            }, {
                'name':
                'master-node-oo-hs',
                'flavor_id':
                self.flavor_id,
                'node_processes':
                ['oozie', 'historyserver', 'secondarynamenode'],
                'floating_ip_pool':
                self.floating_ip_pool,
                'count':
                1,
                'node_configs':
                self.ng_params
            }, {
                'name': 'worker-node-nm-dn',
                'node_group_template_id': self.ng_tmpl_nm_dn_id,
                'count': 2
            }, {
                'name': 'worker-node-dn',
                'node_group_template_id': self.ng_tmpl_dn_id,
                'count': 1
            }, {
                'name': 'worker-node-nm',
                'node_group_template_id': self.ng_tmpl_nm_id,
                'count': 1
            }],
            'net_id':
            self.internal_neutron_net
        }
        self.cluster_template_id = self.create_cluster_template(**template)

    @b.errormsg("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster_name = '%s-%s-v2' % (self.common_config.CLUSTER_NAME,
                                     self.vanilla_two_config.PLUGIN_NAME)
        cluster = {
            'name': cluster_name,
            'plugin_config': self.vanilla_two_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {}
        }
        cluster_id = self.create_cluster(**cluster)
        self.poll_cluster_state(cluster_id)
        self.cluster_info = self.get_cluster_info(self.vanilla_two_config)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.vanilla_two_config)

    @b.errormsg("Failure while Cinder testing: ")
    def _check_cinder(self):
        self.cinder_volume_testing(self.cluster_info)

    @b.errormsg("Failure while Map Reduce testing: ")
    def _check_mapreduce(self):
        self.map_reduce_testing(self.cluster_info)

    @b.errormsg("Failure during check of Swift availability: ")
    def _check_swift(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing: ")
    def _check_edp(self):
        self.poll_jobs_status(list(self._run_edp_tests()))

    def _run_edp_tests(self):
        skipped_edp_job_types = self.vanilla_two_config.SKIP_EDP_JOB_TYPES

        if utils_edp.JOB_TYPE_PIG not in skipped_edp_job_types:
            yield self._edp_pig_test()
        if utils_edp.JOB_TYPE_MAPREDUCE not in skipped_edp_job_types:
            yield self._edp_mapreduce_test()
        if utils_edp.JOB_TYPE_MAPREDUCE_STREAMING not in skipped_edp_job_types:
            yield self._edp_mapreduce_streaming_test()
        if utils_edp.JOB_TYPE_JAVA not in skipped_edp_job_types:
            yield self._edp_java_test()

    def _edp_pig_test(self):
        pig_job = self.edp_info.read_pig_example_script()
        pig_lib = self.edp_info.read_pig_example_jar()

        return self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
                                job_data_list=[{
                                    'pig': pig_job
                                }],
                                lib_data_list=[{
                                    'jar': pig_lib
                                }],
                                swift_binaries=True,
                                hdfs_local_output=True)

    def _edp_mapreduce_test(self):
        mapreduce_jar = self.edp_info.read_mapreduce_example_jar()
        mapreduce_configs = self.edp_info.mapreduce_example_configs()
        return self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE,
                                job_data_list=[],
                                lib_data_list=[{
                                    'jar': mapreduce_jar
                                }],
                                configs=mapreduce_configs,
                                swift_binaries=True,
                                hdfs_local_output=True)

    def _edp_mapreduce_streaming_test(self):
        return self.edp_testing(
            job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
            job_data_list=[],
            lib_data_list=[],
            configs=self.edp_info.mapreduce_streaming_configs())

    def _edp_java_test(self):
        java_jar = self.edp_info.read_java_example_lib(2)
        java_configs = self.edp_info.java_example_configs(2)
        return self.edp_testing(utils_edp.JOB_TYPE_JAVA,
                                job_data_list=[],
                                lib_data_list=[{
                                    'jar': java_jar
                                }],
                                configs=java_configs)

    @b.errormsg("Failure while cluster scaling: ")
    def _check_scaling(self):
        change_list = [{
            'operation': 'resize',
            'info': ['worker-node-nm-dn', 1]
        }, {
            'operation': 'resize',
            'info': ['worker-node-dn', 0]
        }, {
            'operation': 'resize',
            'info': ['worker-node-nm', 0]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-nm', 1,
                     '%s' % self.ng_tmpl_nm_id]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-dn', 1,
                     '%s' % self.ng_tmpl_dn_id]
        }]

        self.cluster_info = self.cluster_scaling(self.cluster_info,
                                                 change_list)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.vanilla_two_config)

    @b.errormsg("Failure while Cinder testing after cluster scaling: ")
    def _check_cinder_after_scaling(self):
        self.cinder_volume_testing(self.cluster_info)

    @b.errormsg("Failure while Map Reduce testing after cluster scaling: ")
    def _check_mapreduce_after_scaling(self):
        self.map_reduce_testing(self.cluster_info)

    @b.errormsg(
        "Failure during check of Swift availability after cluster scaling: ")
    def _check_swift_after_scaling(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing after cluster scaling: ")
    def _check_edp_after_scaling(self):
        self._check_edp()

    @testcase.skipIf(
        cfg.ITConfig().vanilla_two_config.SKIP_ALL_TESTS_FOR_PLUGIN,
        "All tests for Vanilla plugin were skipped")
    @testcase.attr('vanilla2')
    def test_vanilla_two_plugin_gating(self):
        self._prepare_test()
        self._create_nm_dn_ng_template()
        self._create_nm_ng_template()
        self._create_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()

        self._check_cinder()
        self._check_mapreduce()
        self._check_swift()
        self._check_edp()

        if not self.vanilla_two_config.SKIP_SCALING_TEST:
            self._check_scaling()
            self._check_cinder_after_scaling()
            self._check_mapreduce_after_scaling()
            self._check_swift_after_scaling()
            self._check_edp_after_scaling()

    def tearDown(self):
        self.delete_objects(self.cluster_id, self.cluster_template_id,
                            self.ng_template_ids)
        super(VanillaTwoGatingTest, self).tearDown()
示例#12
0
class VanillaGatingTest(cinder.CinderVolumeTest,
                        cluster_configs.ClusterConfigTest,
                        map_reduce.MapReduceTest, swift.SwiftTest,
                        scaling.ScalingTest,
                        vanilla_transient_cluster.TransientClusterTest):
    config = cfg.ITConfig().vanilla_config
    SKIP_CINDER_TEST = config.SKIP_CINDER_TEST
    SKIP_CLUSTER_CONFIG_TEST = config.SKIP_CLUSTER_CONFIG_TEST
    SKIP_EDP_TEST = config.SKIP_EDP_TEST
    SKIP_MAP_REDUCE_TEST = config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = config.SKIP_SCALING_TEST
    SKIP_TRANSIENT_CLUSTER_TEST = config.SKIP_TRANSIENT_CLUSTER_TEST

    @testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN,
                     'All tests for Vanilla plugin were skipped')
    @testcase.attr('vanilla1', 'transient')
    def test_vanilla_plugin_gating(self):
        self.vanilla_config.IMAGE_ID, self.vanilla_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.vanilla_config))

        # Default value of self.common_config.FLOATING_IP_POOL is None
        floating_ip_pool = self.common_config.FLOATING_IP_POOL
        internal_neutron_net = None
        # If Neutron enabled then get ID of floating IP pool and ID of internal
        # Neutron network
        if self.common_config.NEUTRON_ENABLED:
            floating_ip_pool = self.get_floating_ip_pool_id_for_neutron_net()
            internal_neutron_net = self.get_internal_neutron_net_id()

#----------------------------TRANSIENT CLUSTER TESTING-------------------------

        try:
            self.transient_cluster_testing(self.vanilla_config,
                                           floating_ip_pool,
                                           internal_neutron_net)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                message = 'Failure while transient cluster testing: '
                self.print_error_log(message, e)

        if self.vanilla_config.ONLY_TRANSIENT_CLUSTER_TEST:
            return

#-------------------------------CLUSTER CREATION-------------------------------

#---------------------"tt-dn" node group template creation---------------------

        node_group_template_id_list = []

        try:
            node_group_template_tt_dn_id = self.create_node_group_template(
                name='test-node-group-template-vanilla-tt-dn',
                plugin_config=self.vanilla_config,
                description='test node group template for Vanilla plugin',
                node_processes=['tasktracker', 'datanode'],
                node_configs={
                    'HDFS': cluster_configs.DN_CONFIG,
                    'MapReduce': cluster_configs.TT_CONFIG
                },
                floating_ip_pool=floating_ip_pool)
            node_group_template_id_list.append(node_group_template_tt_dn_id)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                message = 'Failure while \'tt-dn\' node group ' \
                          'template creation: '
                self.print_error_log(message, e)

#-----------------------"tt" node group template creation----------------------

        if not self.vanilla_config.SKIP_CINDER_TEST:
            volumes_per_node = 2
            volume_size = 2
        else:
            volumes_per_node = 0
            volume_size = 0

        try:
            node_group_template_tt_id = self.create_node_group_template(
                name='test-node-group-template-vanilla-tt',
                plugin_config=self.vanilla_config,
                description='test node group template for Vanilla plugin',
                volumes_per_node=volumes_per_node,
                volume_size=volume_size,
                node_processes=['tasktracker'],
                node_configs={'MapReduce': cluster_configs.TT_CONFIG},
                floating_ip_pool=floating_ip_pool)
            node_group_template_id_list.append(node_group_template_tt_id)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list)
                message = 'Failure while \'tt\' node group template creation: '
                self.print_error_log(message, e)

#----------------------"dn" node group template creation-----------------------

        try:
            node_group_template_dn_id = self.create_node_group_template(
                name='test-node-group-template-vanilla-dn',
                plugin_config=self.vanilla_config,
                description='test node group template for Vanilla plugin',
                volumes_per_node=volumes_per_node,
                volume_size=volume_size,
                node_processes=['datanode'],
                node_configs={'HDFS': cluster_configs.DN_CONFIG},
                floating_ip_pool=floating_ip_pool)
            node_group_template_id_list.append(node_group_template_dn_id)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list)
                message = 'Failure while \'dn\' node group template creation: '
                self.print_error_log(message, e)

#---------------------------Cluster template creation--------------------------

        try:
            cluster_template_id = self.create_cluster_template(
                name='test-cluster-template-vanilla',
                plugin_config=self.vanilla_config,
                description='test cluster template for Vanilla plugin',
                cluster_configs={
                    'HDFS': cluster_configs.CLUSTER_HDFS_CONFIG,
                    'MapReduce': cluster_configs.CLUSTER_MR_CONFIG,
                    'general': {
                        'Enable Swift': True
                    }
                },
                node_groups=[
                    dict(name='master-node-jt-nn',
                         flavor_id=self.flavor_id,
                         node_processes=['namenode', 'jobtracker'],
                         node_configs={
                             'HDFS': cluster_configs.NN_CONFIG,
                             'MapReduce': cluster_configs.JT_CONFIG
                         },
                         floating_ip_pool=floating_ip_pool,
                         count=1),
                    dict(
                        name='master-node-sec-nn-oz',
                        flavor_id=self.flavor_id,
                        node_processes=['secondarynamenode', 'oozie'],
                        node_configs={'JobFlow': cluster_configs.OOZIE_CONFIG},
                        floating_ip_pool=floating_ip_pool,
                        count=1),
                    dict(name='worker-node-tt-dn',
                         node_group_template_id=node_group_template_tt_dn_id,
                         count=3),
                    dict(name='worker-node-dn',
                         node_group_template_id=node_group_template_dn_id,
                         count=1),
                    dict(name='worker-node-tt',
                         node_group_template_id=node_group_template_tt_id,
                         count=1)
                ],
                net_id=internal_neutron_net)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list)
                message = 'Failure while cluster template creation: '
                self.print_error_log(message, e)

#-------------------------------Cluster creation-------------------------------

        try:
            cluster_name = "%s-%s-v1" % (self.common_config.CLUSTER_NAME,
                                         self.vanilla_config.PLUGIN_NAME)
            self.create_cluster(name=cluster_name,
                                plugin_config=self.vanilla_config,
                                cluster_template_id=cluster_template_id,
                                description='test cluster',
                                cluster_configs={})

            cluster_info = self.get_cluster_info(self.vanilla_config)
            self.await_active_workers_for_namenode(cluster_info['node_info'],
                                                   self.vanilla_config)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(self.cluster_id, cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while cluster creation: '
                self.print_error_log(message, e)

#---------------------------------CINDER TESTING-------------------------------

        try:
            self.cinder_volume_testing(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while Cinder testing: '
                self.print_error_log(message, e)

#----------------------------CLUSTER CONFIG TESTING----------------------------

        try:
            self.cluster_config_testing(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while cluster config testing: '
                self.print_error_log(message, e)

#----------------------------------EDP TESTING---------------------------------

        path = 'sahara/tests/integration/tests/resources/'
        pig_job_data = open(path + 'edp-job.pig').read()
        pig_lib_data = open(path + 'edp-lib.jar').read()
        mapreduce_jar_data = open(path + 'edp-mapreduce.jar').read()

        # This is a modified version of WordCount that takes swift configs
        java_lib_data = open(path + 'edp-java/edp-java.jar').read()
        java_configs = {
            "configs": {
                "edp.java.main_class":
                "org.openstack.sahara.examples.WordCount"
            }
        }

        mapreduce_configs = {
            "configs": {
                "mapred.mapper.class": "org.apache.oozie.example.SampleMapper",
                "mapred.reducer.class":
                "org.apache.oozie.example.SampleReducer"
            }
        }
        mapreduce_streaming_configs = {
            "configs": {
                "edp.streaming.mapper": "/bin/cat",
                "edp.streaming.reducer": "/usr/bin/wc"
            }
        }
        try:
            self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
                             job_data_list=[{
                                 'pig': pig_job_data
                             }],
                             lib_data_list=[{
                                 'jar': pig_lib_data
                             }],
                             swift_binaries=True,
                             hdfs_local_output=True)
            self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE,
                             job_data_list=[],
                             lib_data_list=[{
                                 'jar': mapreduce_jar_data
                             }],
                             configs=mapreduce_configs,
                             swift_binaries=True,
                             hdfs_local_output=True)
            self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
                             job_data_list=[],
                             lib_data_list=[],
                             configs=mapreduce_streaming_configs)
            self.edp_testing(job_type=utils_edp.JOB_TYPE_JAVA,
                             job_data_list=[],
                             lib_data_list=[{
                                 'jar': java_lib_data
                             }],
                             configs=java_configs,
                             pass_input_output_args=True)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while EDP testing: '
                self.print_error_log(message, e)

#------------------------------MAP REDUCE TESTING------------------------------

        try:
            self.map_reduce_testing(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while Map Reduce testing: '
                self.print_error_log(message, e)

#---------------------------CHECK SWIFT AVAILABILITY---------------------------

        try:
            self.check_swift_availability(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure during check of Swift availability: '
                self.print_error_log(message, e)

#--------------------------------CLUSTER SCALING-------------------------------

        if not self.vanilla_config.SKIP_SCALING_TEST:
            change_list = [{
                'operation': 'resize',
                'info': ['worker-node-tt-dn', 4]
            }, {
                'operation': 'resize',
                'info': ['worker-node-dn', 0]
            }, {
                'operation': 'resize',
                'info': ['worker-node-tt', 0]
            }, {
                'operation':
                'add',
                'info': ['new-worker-node-tt', 1, node_group_template_tt_id]
            }, {
                'operation':
                'add',
                'info': ['new-worker-node-dn', 1, node_group_template_dn_id]
            }]
            try:
                new_cluster_info = self.cluster_scaling(
                    cluster_info, change_list)
                self.await_active_workers_for_namenode(
                    new_cluster_info['node_info'], self.vanilla_config)
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)
                    message = 'Failure while cluster scaling: '
                    self.print_error_log(message, e)

#--------------------------CINDER TESTING AFTER SCALING------------------------

            try:
                self.cinder_volume_testing(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(new_cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)
                    message = 'Failure while Cinder testing after cluster ' \
                              'scaling: '
                    self.print_error_log(message, e)

#---------------------CLUSTER CONFIG TESTING AFTER SCALING---------------------

            try:
                self.cluster_config_testing(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(new_cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)
                    message = 'Failure while cluster config testing after ' \
                              'cluster scaling: '
                    self.print_error_log(message, e)

#-----------------------MAP REDUCE TESTING AFTER SCALING-----------------------

            try:
                self.map_reduce_testing(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(new_cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)
                    message = 'Failure while Map Reduce testing after ' \
                              'cluster scaling: '
                    self.print_error_log(message, e)

#--------------------CHECK SWIFT AVAILABILITY AFTER SCALING--------------------

            try:
                self.check_swift_availability(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(new_cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)
                    message = 'Failure during check of Swift availability ' \
                              'after cluster scaling: '
                    self.print_error_log(message, e)

#----------------------------DELETE CREATED OBJECTS----------------------------

        self.delete_objects(cluster_info['cluster_id'], cluster_template_id,
                            node_group_template_id_list)
示例#13
0
 def get_plugin_config(self):
     return cfg.ITConfig().cdh_config
示例#14
0
class CDHGatingTest(cluster_configs.ClusterConfigTest,
                    map_reduce.MapReduceTest, swift.SwiftTest,
                    scaling.ScalingTest, cinder.CinderVolumeTest, edp.EDPTest):

    cdh_config = cfg.ITConfig().cdh_config
    SKIP_MAP_REDUCE_TEST = cdh_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = cdh_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = cdh_config.SKIP_SCALING_TEST
    SKIP_CINDER_TEST = cdh_config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = cdh_config.SKIP_EDP_TEST

    def setUp(self):
        super(CDHGatingTest, self).setUp()
        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []

    def get_plugin_config(self):
        return cfg.ITConfig().cdh_config

    @b.errormsg("Failure while 'nm-dn' node group template creation: ")
    def _create_nm_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-cdh-nm-dn',
            'plugin_config': self.plugin_config,
            'description': 'test node group template for CDH plugin',
            'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE'],
            'floating_ip_pool': self.floating_ip_pool,
            'auto_security_group': True,
            'node_configs': {}
        }
        self.ng_tmpl_nm_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_nm_dn_id)

    @b.errormsg("Failure while 'nm' node group template creation: ")
    def _create_nm_ng_template(self):
        template = {
            'name': 'test-node-group-template-cdh-nm',
            'plugin_config': self.plugin_config,
            'description': 'test node group template for CDH plugin',
            'volumes_per_node': self.volumes_per_node,
            'volumes_size': self.volumes_size,
            'node_processes': ['YARN_NODEMANAGER'],
            'floating_ip_pool': self.floating_ip_pool,
            'auto_security_group': True,
            'node_configs': {}
        }
        self.ng_tmpl_nm_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_nm_id)

    @b.errormsg("Failure while 'dn' node group template creation: ")
    def _create_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-cdh-dn',
            'plugin_config': self.plugin_config,
            'description': 'test node group template for CDH plugin',
            'volumes_per_node': self.volumes_per_node,
            'volumes_size': self.volumes_size,
            'node_processes': ['HDFS_DATANODE'],
            'floating_ip_pool': self.floating_ip_pool,
            'auto_security_group': True,
            'node_configs': {}
        }
        self.ng_tmpl_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_dn_id)

    @b.errormsg("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        cl_config = {
            'general': {
                'CDH5 repo list URL': self.plugin_config.CDH_REPO_LIST_URL,
                'CM5 repo list URL': self.plugin_config.CM_REPO_LIST_URL,
                'CDH5 repo key URL (for debian-based only)':
                self.plugin_config.CDH_APT_KEY_URL,
                'CM5 repo key URL (for debian-based only)':
                self.plugin_config.CM_APT_KEY_URL,
                'Enable Swift': True
            }
        }
        template = {
            'name':
            'test-cluster-template-cdh',
            'plugin_config':
            self.plugin_config,
            'description':
            'test cluster template for CDH plugin',
            'cluster_configs':
            cl_config,
            'node_groups': [{
                'name': 'manager-node',
                'flavor_id': self.plugin_config.MANAGERNODE_FLAVOR,
                'node_processes': ['CLOUDERA_MANAGER'],
                'floating_ip_pool': self.floating_ip_pool,
                'auto_security_group': True,
                'count': 1
            }, {
                'name':
                'master-node-rm-nn',
                'flavor_id':
                self.plugin_config.MANAGERNODE_FLAVOR,
                'node_processes': ['HDFS_NAMENODE', 'YARN_RESOURCEMANAGER'],
                'floating_ip_pool':
                self.floating_ip_pool,
                'auto_security_group':
                True,
                'count':
                1
            }, {
                'name':
                'master-node-oo-hs-snn-hm-hs2',
                'flavor_id':
                self.plugin_config.MANAGERNODE_FLAVOR,
                'node_processes': [
                    'OOZIE_SERVER', 'YARN_JOBHISTORY',
                    'HDFS_SECONDARYNAMENODE', 'HIVE_METASTORE', 'HIVE_SERVER2'
                ],
                'floating_ip_pool':
                self.floating_ip_pool,
                'auto_security_group':
                True,
                'count':
                1
            }, {
                'name': 'worker-node-nm-dn',
                'node_group_template_id': self.ng_tmpl_nm_dn_id,
                'count': 2
            }, {
                'name': 'worker-node-dn',
                'node_group_template_id': self.ng_tmpl_dn_id,
                'count': 1
            }, {
                'name': 'worker-node-nm',
                'node_group_template_id': self.ng_tmpl_nm_id,
                'count': 1
            }],
            'net_id':
            self.internal_neutron_net
        }
        self.cluster_template_id = self.create_cluster_template(**template)

    @b.errormsg("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster_name = '%s-%s' % (self.common_config.CLUSTER_NAME,
                                  self.plugin_config.PLUGIN_NAME)
        cluster = {
            'name': cluster_name,
            'plugin_config': self.plugin_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {
                'HDFS': {
                    'dfs_replication': 1
                }
            }
        }
        self.cluster_id = self.create_cluster(**cluster)
        self.poll_cluster_state(self.cluster_id)
        self.cluster_info = self.get_cluster_info(self.plugin_config)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.plugin_config)

    @b.errormsg("Failure while Cinder testing: ")
    def _check_cinder(self):
        self.cinder_volume_testing(self.cluster_info)

    @b.errormsg("Failure while Map Reduce testing: ")
    def _check_mapreduce(self):
        self.map_reduce_testing(self.cluster_info, check_log=False)

    @b.errormsg("Failure during check of Swift availability: ")
    def _check_swift(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing: ")
    def _check_edp(self):
        self.poll_jobs_status(list(self._run_edp_test()))

    def _run_edp_test(self):
        # check pig
        pig_job = self.edp_info.read_pig_example_script()
        pig_lib = self.edp_info.read_pig_example_jar()
        yield self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
                               job_data_list=[{
                                   'pig': pig_job
                               }],
                               lib_data_list=[{
                                   'jar': pig_lib
                               }],
                               swift_binaries=False,
                               hdfs_local_output=True)

        # check mapreduce
        mapreduce_jar = self.edp_info.read_mapreduce_example_jar()
        mapreduce_configs = self.edp_info.mapreduce_example_configs()
        yield self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE,
                               job_data_list=[],
                               lib_data_list=[{
                                   'jar': mapreduce_jar
                               }],
                               configs=mapreduce_configs,
                               swift_binaries=False,
                               hdfs_local_output=True)

        # check mapreduce streaming
        yield self.edp_testing(
            job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
            job_data_list=[],
            lib_data_list=[],
            configs=self.edp_info.mapreduce_streaming_configs(),
            swift_binaries=False,
            hdfs_local_output=True)

        # check hive
        yield self.check_edp_hive()

        # check Java
        java_jar = self.edp_info.read_java_example_lib(2)
        java_configs = self.edp_info.java_example_configs(2)
        yield self.edp_testing(utils_edp.JOB_TYPE_JAVA,
                               job_data_list=[],
                               lib_data_list=[{
                                   'jar': java_jar
                               }],
                               configs=java_configs)

    @b.errormsg("Failure while cluster scaling: ")
    def _check_scaling(self):
        change_list = [{
            'operation': 'resize',
            'info': ['worker-node-nm-dn', 1]
        }, {
            'operation': 'resize',
            'info': ['worker-node-dn', 0]
        }, {
            'operation': 'resize',
            'info': ['worker-node-nm', 0]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-nm', 1,
                     '%s' % self.ng_tmpl_nm_id]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-dn', 1,
                     '%s' % self.ng_tmpl_dn_id]
        }]

        self.cluster_info = self.cluster_scaling(self.cluster_info,
                                                 change_list)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.plugin_config)

    @b.errormsg("Failure while Cinder testing after cluster scaling: ")
    def _check_cinder_after_scaling(self):
        self.cinder_volume_testing(self.cluster_info)

    @b.errormsg("Failure while Map Reduce testing after cluster scaling: ")
    def _check_mapreduce_after_scaling(self):
        self.map_reduce_testing(self.cluster_info, check_log=False)

    @b.errormsg(
        "Failure during check of Swift availability after cluster scaling: ")
    def _check_swift_after_scaling(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing after cluster scaling: ")
    def _check_edp_after_scaling(self):
        self._check_edp()

    @testcase.skipIf(cfg.ITConfig().cdh_config.SKIP_ALL_TESTS_FOR_PLUGIN,
                     "All tests for CDH plugin were skipped")
    @testcase.attr('cdh')
    def test_cdh_plugin_gating(self):
        self._success = False
        self._create_nm_dn_ng_template()
        self._create_nm_ng_template()
        self._create_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()

        self._check_cinder()
        self._check_mapreduce()
        self._check_swift()
        self._check_edp()

        if not self.plugin_config.SKIP_SCALING_TEST:
            self._check_scaling()
            self._check_cinder_after_scaling()
            self._check_edp_after_scaling()

        self._success = True

    def print_manager_log(self):
        if not self.cluster_id:
            return

        manager_node = None
        for ng in self.sahara.clusters.get(self.cluster_id).node_groups:
            if 'CLOUDERA_MANAGER' in ng['node_processes']:
                manager_node = ng['instances'][0]['management_ip']
                break

        if not manager_node:
            print("Cloudera Manager node not found")
            return

        self.open_ssh_connection(manager_node)
        try:
            log = self.execute_command('sudo cat /var/log/cloudera-scm-server/'
                                       'cloudera-scm-server.log')[1]
        finally:
            self.close_ssh_connection()

        print("\n\nCLOUDERA MANAGER LOGS\n\n")
        print(log)
        print("\n\nEND OF CLOUDERA MANAGER LOGS\n\n")

    def tearDown(self):
        if not self._success:
            self.print_manager_log()

        self.delete_objects(self.cluster_id, self.cluster_template_id,
                            self.ng_template_ids)
        super(CDHGatingTest, self).tearDown()
示例#15
0
class Mapr4_1GatingTest(swift.SwiftTest, scaling.ScalingTest,
                        edp.EDPTest):

    config = cfg.ITConfig().mapr4_1_config
    SKIP_EDP_TEST = config.SKIP_EDP_TEST
    SKIP_SWIFT_TEST = config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = config.SKIP_SCALING_TEST

    def setUp(self):
        super(Mapr4_1GatingTest, self).setUp()
        self.cluster_id = None
        self.cluster_template_id = None

    def _prepare_test(self):
        self.mapr4_1_config = cfg.ITConfig().mapr4_1_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = (
                self.get_floating_ip_pool_id_for_neutron_net())

        self.mapr4_1_config.IMAGE_ID, self.mapr4_1_config.SSH_USERNAME = (
            (self.get_image_id_and_ssh_username(self.mapr4_1_config)))

    @b.errormsg("Failure while 'jt-nn' node group template creation: ")
    def _create_jt_nn_ng_template(self):
        template = {
            'name': 'test-node-group-template-mapr4_1-jt-nn',
            'plugin_config': self.mapr4_1_config,
            'description': 'test node group template for MAPR plugin',
            # NEED CHANGES MASTER_NODE
            'node_processes': self.mapr4_1_config.MASTER_NODE_PROCESSES,
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_jt_nn_id = self.create_node_group_template(**template)
        self.addCleanup(self.delete_objects,
                        node_group_template_id_list=[self.ng_tmpl_jt_nn_id])

    @b.errormsg("Failure while 'nm-dn' node group template creation: ")
    def _create_nm_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-mapr4_1-nm-dn',
            'plugin_config': self.mapr4_1_config,
            'description': 'test node group template for MAPR plugin',
            # NEED CHANGES WORKER
            'node_processes': self.mapr4_1_config.WORKER_NODE_PROCESSES,
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_nm_dn_id = self.create_node_group_template(**template)
        self.addCleanup(self.delete_objects,
                        node_group_template_id_list=[self.ng_tmpl_nm_dn_id])

    @b.errormsg("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        template = {
            'name': 'test-cluster-template-mapr4_1',
            'plugin_config': self.mapr4_1_config,
            'description': 'test cluster template for MAPR plugin',
            'cluster_configs': {
                'YARN': {
                    'yarn.log-aggregation-enable': False
                }
            },
            'node_groups': [
                {
                    'name': 'master-node-dn',
                    'node_group_template_id': self.ng_tmpl_jt_nn_id,
                    'count': 1
                },
                {
                    'name': 'worker-node-nm',
                    'node_group_template_id': self.ng_tmpl_nm_dn_id,
                    'count': 3
                }
            ],
            'net_id': self.internal_neutron_net
        }
        self.cluster_template_id = self.create_cluster_template(**template)
        self.addCleanup(self.delete_objects,
                        cluster_template_id=self.cluster_template_id)

    @b.errormsg("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster_name = '%s-%s-v2' % (self.common_config.CLUSTER_NAME,
                                     self.mapr4_1_config.PLUGIN_NAME)
        cluster = {
            'name': cluster_name,
            'plugin_config': self.mapr4_1_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {}
        }
        cluster_id = self.create_cluster(**cluster)
        self.addCleanup(self.delete_objects, cluster_id=cluster_id)
        self.poll_cluster_state(cluster_id)
        self.cluster_info = self.get_cluster_info(self.mapr4_1_config)
        self.await_active_tasktracker(
            self.cluster_info['node_info'], self.mapr4_1_config)

    @b.errormsg("Failure during check of Swift availability: ")
    def _check_swift(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing: ")
    def _check_edp(self):
        self.poll_jobs_status(list(self._run_edp_tests()))

    def _run_edp_tests(self):
        skipped_edp_job_types = self.mapr4_1_config.SKIP_EDP_JOB_TYPES

        if utils_edp.JOB_TYPE_PIG not in skipped_edp_job_types:
            yield self._edp_pig_test()
        if utils_edp.JOB_TYPE_MAPREDUCE not in skipped_edp_job_types:
            yield self._edp_mapreduce_test()
        if utils_edp.JOB_TYPE_MAPREDUCE_STREAMING not in skipped_edp_job_types:
            yield self._edp_mapreduce_streaming_test()
        if utils_edp.JOB_TYPE_JAVA not in skipped_edp_job_types:
            yield self._edp_java_test()

    def _edp_pig_test(self):
        pig_job = self.edp_info.read_pig_example_script()
        pig_lib = self.edp_info.read_pig_example_jar()

        return self.edp_testing(
            job_type=utils_edp.JOB_TYPE_PIG,
            job_data_list=[{'pig': pig_job}],
            lib_data_list=[{'jar': pig_lib}],
            swift_binaries=True,
            hdfs_local_output=True)

    def _edp_mapreduce_test(self):
        mapreduce_jar = self.edp_info.read_mapreduce_example_jar()
        mapreduce_configs = self.edp_info.mapreduce_example_configs()
        return self.edp_testing(
            job_type=utils_edp.JOB_TYPE_MAPREDUCE,
            job_data_list=[],
            lib_data_list=[{'jar': mapreduce_jar}],
            configs=mapreduce_configs,
            swift_binaries=True,
            hdfs_local_output=True)

    def _edp_mapreduce_streaming_test(self):
        return self.edp_testing(
            job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
            job_data_list=[],
            lib_data_list=[],
            configs=self.edp_info.mapreduce_streaming_configs())

    def _edp_java_test(self):
        java_jar = self.edp_info.read_java_example_lib(1)
        java_configs = self.edp_info.java_example_configs(1)
        return self.edp_testing(
            utils_edp.JOB_TYPE_JAVA,
            job_data_list=[],
            lib_data_list=[{'jar': java_jar}],
            configs=java_configs)

    @b.errormsg("Failure while cluster scaling: ")
    def _check_scaling(self):
        datanode_count_after_resizing = (
            self.cluster_info['node_info']['datanode_count']
            + self.mapr4_1_config.SCALE_EXISTING_NG_COUNT)
        change_list = [
            {
                'operation': 'resize',
                'info': ['worker-node-nm',
                         datanode_count_after_resizing]
            },
            {
                'operation': 'add',
                'info': ['new-worker-node-tt-dn',
                         self.mapr4_1_config.SCALE_NEW_NG_COUNT,
                         '%s' % self.ng_tmpl_nm_dn_id]
            }
        ]

        self.cluster_info = self.cluster_scaling(self.cluster_info,
                                                 change_list)
        self.await_active_tasktracker(
            self.cluster_info['node_info'], self.mapr4_1_config)

    @b.errormsg(
        "Failure during check of Swift availability after cluster scaling: ")
    def _check_swift_after_scaling(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing after cluster scaling: ")
    def _check_edp_after_scaling(self):
        self._check_edp()

    @testcase.attr('mapr4_1')
    def test_mapr4_1_plugin_gating(self):
        self._prepare_test()
        self._create_jt_nn_ng_template()
        self._create_nm_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()

        self._check_swift()
        self._check_edp()

        if not self.mapr4_1_config.SKIP_SCALING_TEST:
            self._check_scaling()
            self._check_swift_after_scaling()
            self._check_edp_after_scaling()
class HDPGatingTest(cinder.CinderVolumeTest, edp.EDPTest,
                    map_reduce.MapReduceTest, swift.SwiftTest,
                    scaling.ScalingTest):
    config = cfg.ITConfig().hdp_config
    SKIP_CINDER_TEST = config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = config.SKIP_EDP_TEST
    SKIP_MAP_REDUCE_TEST = config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = config.SKIP_SCALING_TEST

    def get_plugin_config(self):
        return cfg.ITConfig().hdp_config

    @testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN,
                     'All tests for HDP plugin were skipped')
    @testcase.attr('hdp1')
    def test_hdp_plugin_gating(self):
        node_group_template_id_list = []

# ------------------------------CLUSTER CREATION-------------------------------

# ----------------------"tt-dn" node group template creation-------------------

        try:
            node_group_template_tt_dn_id = self.create_node_group_template(
                name='test-node-group-template-hdp-tt-dn',
                plugin_config=self.plugin_config,
                description='test node group template for HDP plugin',
                volumes_per_node=self.volumes_per_node,
                volumes_size=self.volumes_size,
                node_processes=self.plugin_config.WORKER_NODE_PROCESSES,
                node_configs={},
                floating_ip_pool=self.floating_ip_pool,
                auto_security_group=True
            )
            node_group_template_id_list.append(node_group_template_tt_dn_id)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                message = ('Failure while \'tt-dn\' node group '
                           'template creation: ')
                self.print_error_log(message, e)

# --------------------------Cluster template creation--------------------------

        try:
            cluster_template_id = self.create_cluster_template(
                name='test-cluster-template-hdp',
                plugin_config=self.plugin_config,
                description='test cluster template for HDP plugin',
                cluster_configs={},
                node_groups=[
                    dict(
                        name='master-node-jt-nn',
                        flavor_id=self.flavor_id,
                        node_processes=(
                            self.plugin_config.MASTER_NODE_PROCESSES),
                        node_configs={},
                        floating_ip_pool=self.floating_ip_pool,
                        count=1,
                        auto_security_group=True
                    ),
                    dict(
                        name='worker-node-tt-dn',
                        node_group_template_id=node_group_template_tt_dn_id,
                        count=3)
                ],
                net_id=self.internal_neutron_net
            )

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list
                )
                message = 'Failure while cluster template creation: '
                self.print_error_log(message, e)

# ------------------------------Cluster creation-------------------------------

        cluster_name = (self.common_config.CLUSTER_NAME + '-' +
                        self.plugin_config.PLUGIN_NAME)
        try:
            cluster_id = self.create_cluster(
                name=cluster_name,
                plugin_config=self.plugin_config,
                cluster_template_id=cluster_template_id,
                description='test cluster',
                cluster_configs={}
            )
            self.poll_cluster_state(cluster_id)

            cluster_info = self.get_cluster_info(self.plugin_config)
            self.await_active_workers_for_namenode(cluster_info['node_info'],
                                                   self.plugin_config)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    self.cluster_id, cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure while cluster creation: '
                self.print_error_log(message, e)

# --------------------------------CINDER TESTING-------------------------------

        try:
            self.cinder_volume_testing(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure while Cinder testing: '
                self.print_error_log(message, e)

# ---------------------------------EDP TESTING---------------------------------

        pig_job_data = self.edp_info.read_pig_example_script()
        pig_lib_data = self.edp_info.read_pig_example_jar()

        mapreduce_jar_data = self.edp_info.read_mapreduce_example_jar()

        # This is a modified version of WordCount that takes swift configs
        java_lib_data = self.edp_info.read_java_example_lib()

        try:
            job_ids = []
            job_id = self.edp_testing(
                job_type=utils_edp.JOB_TYPE_PIG,
                job_data_list=[{'pig': pig_job_data}],
                lib_data_list=[{'jar': pig_lib_data}],
                swift_binaries=True,
                hdfs_local_output=True)
            job_ids.append(job_id)

            job_id = self.edp_testing(
                job_type=utils_edp.JOB_TYPE_MAPREDUCE,
                job_data_list=[],
                lib_data_list=[{'jar': mapreduce_jar_data}],
                configs=self.edp_info.mapreduce_example_configs(),
                swift_binaries=True,
                hdfs_local_output=True)
            job_ids.append(job_id)

            job_id = self.edp_testing(
                job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
                job_data_list=[],
                lib_data_list=[],
                configs=self.edp_info.mapreduce_streaming_configs())
            job_ids.append(job_id)

            job_id = self.edp_testing(
                job_type=utils_edp.JOB_TYPE_JAVA,
                job_data_list=[],
                lib_data_list=[{'jar': java_lib_data}],
                configs=self.edp_info.java_example_configs(),
                pass_input_output_args=True)
            job_ids.append(job_id)
            self.poll_jobs_status(job_ids)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure while EDP testing: '
                self.print_error_log(message, e)

# -----------------------------MAP REDUCE TESTING------------------------------

        try:
            self.map_reduce_testing(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure while Map Reduce testing: '
                self.print_error_log(message, e)

# --------------------------CHECK SWIFT AVAILABILITY---------------------------

        try:
            self.check_swift_availability(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure during check of Swift availability: '
                self.print_error_log(message, e)

# -------------------------------CLUSTER SCALING-------------------------------

        if not self.plugin_config.SKIP_SCALING_TEST:
            datanode_count_after_resizing = (
                cluster_info['node_info']['datanode_count']
                + self.plugin_config.SCALE_EXISTING_NG_COUNT)
            change_list = [
                {
                    'operation': 'resize',
                    'info': ['worker-node-tt-dn',
                             datanode_count_after_resizing]
                },
                {
                    'operation': 'add',
                    'info': [
                        'new-worker-node-tt-dn',
                        self.plugin_config.SCALE_NEW_NG_COUNT,
                        '%s' % node_group_template_tt_dn_id
                    ]
                }
            ]
            try:
                new_cluster_info = self.cluster_scaling(cluster_info,
                                                        change_list)
                self.await_active_workers_for_namenode(
                    new_cluster_info['node_info'], self.plugin_config)
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(
                        cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )
                    message = 'Failure while cluster scaling: '
                    self.print_error_log(message, e)

# -------------------------CINDER TESTING AFTER SCALING------------------------

            try:
                self.cinder_volume_testing(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(
                        new_cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )
                    message = ('Failure while Cinder testing after cluster '
                               'scaling: ')
                    self.print_error_log(message, e)

# ----------------------MAP REDUCE TESTING AFTER SCALING-----------------------

            try:
                self.map_reduce_testing(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(
                        new_cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )
                    message = ('Failure while Map Reduce testing after '
                               'cluster scaling: ')
                    self.print_error_log(message, e)

# -------------------CHECK SWIFT AVAILABILITY AFTER SCALING--------------------

            try:
                self.check_swift_availability(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(
                        new_cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )
                    message = ('Failure during check of Swift availability '
                               'after cluster scaling: ')
                    self.print_error_log(message, e)

# ---------------------------DELETE CREATED OBJECTS----------------------------

        self.delete_objects(
            cluster_info['cluster_id'], cluster_template_id,
            node_group_template_id_list
        )
示例#17
0
class VanillaGatingTest(cinder.CinderVolumeTest,
                        cluster_configs.ClusterConfigTest,
                        map_reduce.MapReduceTest, swift.SwiftTest,
                        scaling.ScalingTest, edp.EDPTest):
    config = cfg.ITConfig().vanilla_config
    SKIP_CINDER_TEST = config.SKIP_CINDER_TEST
    SKIP_CLUSTER_CONFIG_TEST = config.SKIP_CLUSTER_CONFIG_TEST
    SKIP_EDP_TEST = config.SKIP_EDP_TEST
    SKIP_MAP_REDUCE_TEST = config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = config.SKIP_SCALING_TEST

    def _prepare_test(self):
        self.plugin_config = cfg.ITConfig().vanilla_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = (
                self.get_floating_ip_pool_id_for_neutron_net())

        self.plugin_config.IMAGE_ID, self.plugin_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.plugin_config))

        self.volumes_per_node = 0
        self.volumes_size = 0
        if not self.SKIP_CINDER_TEST:
            self.volumes_per_node = 2
            self.volumes_size = 2

    @b.errormsg("Failure while 'tt-dn' node group template creation: ")
    def _create_tt_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-vanilla-tt-dn',
            'plugin_config': self.plugin_config,
            'description': 'test node group template for Vanilla 1 plugin',
            'node_processes': ['tasktracker', 'datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {
                'HDFS': cluster_configs.DN_CONFIG,
                'MapReduce': cluster_configs.TT_CONFIG
            }
        }
        self.ng_tmpl_tt_dn_id = self.create_node_group_template(**template)
        self.addCleanup(self.delete_objects,
                        node_group_template_id_list=[self.ng_tmpl_tt_dn_id])

    @b.errormsg("Failure while 'tt' node group template creation: ")
    def _create_tt_ng_template(self):
        template = {
            'name': 'test-node-group-template-vanilla-tt',
            'plugin_config': self.plugin_config,
            'description': 'test node group template for Vanilla 1 plugin',
            'volumes_per_node': self.volumes_per_node,
            'volumes_size': self.volumes_size,
            'node_processes': ['tasktracker'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {
                'MapReduce': cluster_configs.TT_CONFIG
            }
        }
        self.ng_tmpl_tt_id = self.create_node_group_template(**template)
        self.addCleanup(self.delete_objects,
                        node_group_template_id_list=[self.ng_tmpl_tt_id])

    @b.errormsg("Failure while 'dn' node group template creation: ")
    def _create_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-vanilla-dn',
            'plugin_config': self.plugin_config,
            'description': 'test node group template for Vanilla 1 plugin',
            'volumes_per_node': self.volumes_per_node,
            'volumes_size': self.volumes_size,
            'node_processes': ['datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {
                'HDFS': cluster_configs.DN_CONFIG
            }
        }
        self.ng_tmpl_dn_id = self.create_node_group_template(**template)
        self.addCleanup(self.delete_objects,
                        node_group_template_id_list=[self.ng_tmpl_dn_id])

    @b.errormsg("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        template = {
            'name': 'test-cluster-template-vanilla',
            'plugin_config': self.plugin_config,
            'description': 'test cluster template for Vanilla 1 plugin',
            'net_id': self.internal_neutron_net,
            'cluster_configs': {
                'HDFS': cluster_configs.CLUSTER_HDFS_CONFIG,
                'MapReduce': cluster_configs.CLUSTER_MR_CONFIG,
                'general': {
                    'Enable Swift': True
                }
            },
            'node_groups': [
                {
                    'name': 'master-node-jt-nn',
                    'flavor_id': self.flavor_id,
                    'node_processes': ['namenode', 'jobtracker'],
                    'floating_ip_pool': self.floating_ip_pool,
                    'node_configs': {
                        'HDFS': cluster_configs.NN_CONFIG,
                        'MapReduce': cluster_configs.JT_CONFIG
                    },
                    'count': 1
                },
                {
                    'name': 'master-node-sec-nn-oz',
                    'flavor_id': self.flavor_id,
                    'node_processes': ['secondarynamenode', 'oozie'],
                    'floating_ip_pool': self.floating_ip_pool,
                    'node_configs': {
                        'HDFS': cluster_configs.SNN_CONFIG,
                        'JobFlow': cluster_configs.OOZIE_CONFIG
                    },
                    'count': 1
                },
                {
                    'name': 'worker-node-tt-dn',
                    'node_group_template_id': self.ng_tmpl_tt_dn_id,
                    'count': 2
                },
                {
                    'name': 'worker-node-tt',
                    'node_group_template_id': self.ng_tmpl_tt_id,
                    'count': 1
                },
                {
                    'name': 'worker-node-dn',
                    'node_group_template_id': self.ng_tmpl_dn_id,
                    'count': 1
                }
            ]
        }
        self.cluster_template_id = self.create_cluster_template(**template)
        self.addCleanup(self.delete_objects,
                        cluster_template_id=self.cluster_template_id)

    @b.errormsg("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster_name = '%s-%s' % (self.common_config.CLUSTER_NAME,
                                  self.plugin_config.PLUGIN_NAME)
        kw = {
            'name': cluster_name,
            'plugin_config': self.plugin_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {}
        }
        cluster_id = self.create_cluster(**kw)
        self.addCleanup(self.delete_objects, cluster_id=cluster_id)
        self.poll_cluster_state(cluster_id)
        self.cluster_info = self.get_cluster_info(self.plugin_config)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.plugin_config)

    @b.errormsg("Failure while Cinder testing: ")
    def _check_cinder(self):
        self.cinder_volume_testing(self.cluster_info)

    @b.errormsg("Failure while cluster config testing: ")
    def _check_cluster_config(self):
        self.cluster_config_testing(self.cluster_info)

    def _run_edp_test(self):
        pig_job_data = self.edp_info.read_pig_example_script()
        pig_lib_data = self.edp_info.read_pig_example_jar()
        mapreduce_jar_data = self.edp_info.read_mapreduce_example_jar()
        # This is a modified version of WordCount that takes swift configs
        java_lib_data = self.edp_info.read_java_example_lib()

        yield self.edp_testing(
            job_type=utils_edp.JOB_TYPE_PIG,
            job_data_list=[{'pig': pig_job_data}],
            lib_data_list=[{'jar': pig_lib_data}],
            configs=self.edp_info.pig_example_configs(),
            swift_binaries=True,
            hdfs_local_output=True)

        yield self.edp_testing(
            job_type=utils_edp.JOB_TYPE_MAPREDUCE,
            job_data_list=[],
            lib_data_list=[{'jar': mapreduce_jar_data}],
            configs=self.edp_info.mapreduce_example_configs(),
            swift_binaries=True,
            hdfs_local_output=True)

        yield self.edp_testing(
            job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
            job_data_list=[],
            lib_data_list=[],
            configs=self.edp_info.mapreduce_streaming_configs())

        yield self.edp_testing(
            job_type=utils_edp.JOB_TYPE_JAVA,
            job_data_list=[],
            lib_data_list=[{'jar': java_lib_data}],
            configs=self.edp_info.java_example_configs(),
            pass_input_output_args=True)

    @b.errormsg("Failure while EDP testing: ")
    def _check_edp(self):
        self.poll_jobs_status(list(self._run_edp_test()))

    @b.errormsg("Failure while MapReduce testing: ")
    def _check_mapreduce(self):
        self.map_reduce_testing(self.cluster_info)

    @b.errormsg("Failure during check of Swift availability: ")
    def _check_swift(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while cluster scaling: ")
    def _check_scaling(self):
        change_list = [
            {
                'operation': 'resize',
                'info': ['worker-node-tt-dn', 1]
            },
            {
                'operation': 'resize',
                'info': ['worker-node-dn', 0]
            },
            {
                'operation': 'resize',
                'info': ['worker-node-tt', 0]
            },
            {
                'operation': 'add',
                'info': [
                    'new-worker-node-tt', 1, self.ng_tmpl_tt_id
                ]
            },
            {
                'operation': 'add',
                'info': [
                    'new-worker-node-dn', 1, self.ng_tmpl_dn_id
                ]
            }
        ]
        self.cluster_info = self.cluster_scaling(self.cluster_info,
                                                 change_list)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.plugin_config)

    @b.errormsg("Failure while Cinder testing after cluster scaling: ")
    def _check_cinder_after_scaling(self):
        self.cluster_config_testing(self.cluster_info)

    @b.errormsg("Failure while config testing after cluster scaling: ")
    def _check_cluster_config_after_scaling(self):
        self.cluster_config_testing(self.cluster_info)

    @b.errormsg("Failure while Map Reduce testing after cluster scaling: ")
    def _check_mapredure_after_scaling(self):
        self.map_reduce_testing(self.cluster_info)

    @b.errormsg("Failure during check of Swift availability after scaling: ")
    def _check_swift_after_scaling(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing after cluster scaling: ")
    def _check_edp_after_scaling(self):
        self.poll_jobs_status(list(self._run_edp_test()))

    @testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN,
                     'All tests for Vanilla plugin were skipped')
    @testcase.attr('vanilla1')
    def test_vanilla_plugin_gating(self):
        self._prepare_test()
        self._create_tt_dn_ng_template()
        self._create_tt_ng_template()
        self._create_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()
        self._check_cinder()
        self._check_cluster_config()
        self._check_edp()
        self._check_mapreduce()
        self._check_swift()
        if not self.plugin_config.SKIP_SCALING_TEST:
            self._check_scaling()
            self._check_cinder_after_scaling()
            self._check_cluster_config_after_scaling()
            self._check_mapredure_after_scaling()
            self._check_swift_after_scaling()
            self._check_edp_after_scaling()
示例#18
0
class SparkGatingTest(swift.SwiftTest, scaling.ScalingTest, edp.EDPTest):

    config = cfg.ITConfig().spark_config
    SKIP_EDP_TEST = config.SKIP_EDP_TEST

    def setUp(self):
        super(SparkGatingTest, self).setUp()
        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []

    def get_plugin_config(self):
        return cfg.ITConfig().spark_config

    @b.errormsg("Failure while 'm-nn' node group template creation: ")
    def _create_m_nn_ng_template(self):
        template = {
            'name': 'test-node-group-template-spark-m-nn',
            'plugin_config': self.plugin_config,
            'description': 'test node group template for Spark plugin',
            'node_processes': self.plugin_config.MASTER_NODE_PROCESSES,
            'floating_ip_pool': self.floating_ip_pool,
            'auto_security_group': True,
            'node_configs': {}
        }
        self.ng_tmpl_m_nn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_m_nn_id)
        self.addCleanup(self.delete_objects,
                        node_group_template_id_list=[self.ng_tmpl_m_nn_id])

    @b.errormsg("Failure while 's-dn' node group template creation: ")
    def _create_s_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-spark-s-dn',
            'plugin_config': self.plugin_config,
            'description': 'test node group template for Spark plugin',
            'node_processes': self.plugin_config.WORKER_NODE_PROCESSES,
            'floating_ip_pool': self.floating_ip_pool,
            'auto_security_group': True,
            'node_configs': {}
        }
        self.ng_tmpl_s_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_s_dn_id)
        self.addCleanup(self.delete_objects,
                        node_group_template_id_list=[self.ng_tmpl_s_dn_id])

    @b.errormsg("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        template = {
            'name':
            'test-cluster-template-spark',
            'plugin_config':
            self.plugin_config,
            'description':
            'test cluster template for Spark plugin',
            'cluster_configs': {},
            'node_groups': [{
                'name': 'master-node',
                'node_group_template_id': self.ng_tmpl_m_nn_id,
                'count': 1
            }, {
                'name': 'worker-node',
                'node_group_template_id': self.ng_tmpl_s_dn_id,
                'count': 1
            }],
            'net_id':
            self.internal_neutron_net
        }
        self.cluster_template_id = self.create_cluster_template(**template)
        self.addCleanup(self.delete_objects,
                        cluster_template_id=self.cluster_template_id)

    @b.errormsg("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster_name = '%s-%s' % (self.common_config.CLUSTER_NAME,
                                  self.plugin_config.PLUGIN_NAME)
        cluster = {
            'name': cluster_name,
            'plugin_config': self.plugin_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {}
        }
        cluster_id = self.create_cluster(**cluster)
        self.addCleanup(self.delete_objects, cluster_id=cluster_id)
        self.poll_cluster_state(cluster_id)
        self.cluster_info = self.get_cluster_info(self.plugin_config)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.plugin_config)

    @b.errormsg("Failure while EDP testing: ")
    def _check_edp(self):
        self._edp_test()

    def _edp_test(self):
        # check spark
        spark_jar = self.edp_info.read_spark_example_jar()
        spark_configs = self.edp_info.spark_example_configs()
        job_id = self.edp_testing(utils_edp.JOB_TYPE_SPARK,
                                  job_data_list=[{
                                      'jar': spark_jar
                                  }],
                                  lib_data_list=[],
                                  configs=spark_configs)
        self.poll_jobs_status([job_id])

    @b.errormsg("Failure while cluster scaling: ")
    def _check_scaling(self):
        pass

    @b.errormsg("Failure while EDP testing after cluster scaling: ")
    def _check_edp_after_scaling(self):
        # Leave this blank until scaling is implemented
        pass

    @testcase.attr('spark')
    @testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN,
                     'All tests for Spark plugin were skipped')
    def test_spark_plugin_gating(self):

        self._create_m_nn_ng_template()
        self._create_s_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()

        self._check_edp()

        if not self.plugin_config.SKIP_SCALING_TEST:
            self._check_scaling()
            self._check_edp_after_scaling()

    def tearDown(self):
        super(SparkGatingTest, self).tearDown()
示例#19
0
class MaprGatingTest(cinder.CinderVolumeTest, edp.EDPTest,
                     map_reduce.MapReduceTest, swift.SwiftTest,
                     scaling.ScalingTest):
    config = cfg.ITConfig().mapr_config
    SKIP_CINDER_TEST = config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = config.SKIP_EDP_TEST
    SKIP_MAP_REDUCE_TEST = config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = config.SKIP_SCALING_TEST

    @testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN,
                     'All tests for MAPR plugin were skipped')
    @testcase.attr('mapr1')
    def test_mapr_plugin_gating(self):
        self.mapr_config.IMAGE_ID, self.mapr_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.mapr_config))

        # Default value of self.common_config.FLOATING_IP_POOL is None
        floating_ip_pool = self.common_config.FLOATING_IP_POOL
        internal_neutron_net = None
        # If Neutron enabled then get ID of floating IP pool and ID of internal
        # Neutron network
        if self.common_config.NEUTRON_ENABLED:
            floating_ip_pool = self.get_floating_ip_pool_id_for_neutron_net()
            internal_neutron_net = self.get_internal_neutron_net_id()

        if not self.mapr_config.SKIP_CINDER_TEST:
            volumes_per_node = 2
        else:
            volumes_per_node = 0

        node_group_template_id_list = []

# ------------------------------CLUSTER CREATION-------------------------------

# ----------------------"tt-dn" node group template creation-------------------

        try:
            node_group_template_tt_dn_id = self.create_node_group_template(
                name='test-node-group-template-mapr-tt-dn',
                plugin_config=self.mapr_config,
                description='test node group template for MAPR plugin',
                volumes_per_node=volumes_per_node,
                node_processes=self.mapr_config.WORKER_NODE_PROCESSES,
                # NEED CREATE WORKER_NODE_PROCESSES
                node_configs={},
                floating_ip_pool=floating_ip_pool
            )
            node_group_template_id_list.append(node_group_template_tt_dn_id)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                message = ('Failure while \'tt-dn\' node group '
                           'template creation: ')
                self.print_error_log(message, e)

# --------------------------Cluster template creation--------------------------

        try:
            cluster_template_id = self.create_cluster_template(
                name='test-cluster-template-mapr',
                plugin_config=self.mapr_config,
                description='test cluster template for MAPR plugin',
                cluster_configs={},
                node_groups=[
                    dict(
                        name='master-node-jt-nn',
                        flavor_id=self.flavor_id,
                        node_processes=self.mapr_config.MASTER_NODE_PROCESSES,
                        # NEED CREATE MASTER_NODE_PROCESSES
                        node_configs={},
                        floating_ip_pool=floating_ip_pool,
                        count=1),
                    dict(
                        name='worker-node-tt-dn',
                        node_group_template_id=node_group_template_tt_dn_id,
                        count=3)
                ],
                net_id=internal_neutron_net
            )

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list
                )
                message = 'Failure while cluster template creation: '
                self.print_error_log(message, e)

# ------------------------------Cluster creation-------------------------------

        cluster_name = (self.common_config.CLUSTER_NAME + '-' +
                        self.mapr_config.PLUGIN_NAME)
        try:
            self.create_cluster(
                name=cluster_name,
                plugin_config=self.mapr_config,
                cluster_template_id=cluster_template_id,
                description='test cluster',
                cluster_configs={}
            )

            cluster_info = self.get_cluster_info(self.mapr_config)
            self.await_active_tasktracker(
                cluster_info['node_info'], self.mapr_config)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    self.cluster_id, cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure while cluster creation: '
                self.print_error_log(message, e)

# --------------------------------CINDER TESTING-------------------------------

        try:
            self.cinder_volume_testing(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure while Cinder testing: '
                self.print_error_log(message, e)

# ---------------------------------EDP TESTING---------------------------------

        path = 'sahara/tests/integration/tests/resources/'
        pig_job_data = open(path + 'edp-job.pig').read()
        pig_lib_data = open(path + 'edp-lib.jar').read()
        mapreduce_jar_data = open(path + 'edp-mapreduce.jar').read()

        # This is a modified version of WordCount that takes swift configs
        java_lib_data = open(path + 'edp-java/edp-java.jar').read()
        java_configs = {
            "configs": {
                "edp.java.main_class": ("org.openstack.sahara.examples"
                                        ".WordCount")
            }
        }

        mapreduce_configs = {
            "configs": {
                "mapred.mapper.class": "org.apache.oozie.example.SampleMapper",
                "mapred.reducer.class": ("org.apache.oozie.example"
                                         ".SampleReducer")
            }
        }
        mapreduce_streaming_configs = {
            "configs": {
                "edp.streaming.mapper": "/bin/cat",
                "edp.streaming.reducer": "/usr/bin/wc"
            }
        }
        try:
            self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
                             job_data_list=[{'pig': pig_job_data}],
                             lib_data_list=[{'jar': pig_lib_data}],
                             swift_binaries=True,
                             hdfs_local_output=True)
            self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE,
                             job_data_list=[],
                             lib_data_list=[{'jar': mapreduce_jar_data}],
                             configs=mapreduce_configs,
                             swift_binaries=True,
                             hdfs_local_output=True)
            self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
                             job_data_list=[],
                             lib_data_list=[],
                             configs=mapreduce_streaming_configs)
            self.edp_testing(job_type=utils_edp.JOB_TYPE_JAVA,
                             job_data_list=[],
                             lib_data_list=[{'jar': java_lib_data}],
                             configs=java_configs,
                             pass_input_output_args=True)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure while EDP testing: '
                self.print_error_log(message, e)

# -----------------------------MAP REDUCE TESTING------------------------------

        try:
            self.map_reduce_testing(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure while Map Reduce testing: '
                self.print_error_log(message, e)

# --------------------------CHECK SWIFT AVAILABILITY---------------------------

        try:
            self.check_swift_availability(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )
                message = 'Failure during check of Swift availability: '
                self.print_error_log(message, e)

# -------------------------------CLUSTER SCALING-------------------------------

        if not self.mapr_config.SKIP_SCALING_TEST:
            datanode_count_after_resizing = (
                cluster_info['node_info']['datanode_count']
                + self.mapr_config.SCALE_EXISTING_NG_COUNT)
            change_list = [
                {
                    'operation': 'resize',
                    'info': ['worker-node-tt-dn',
                             datanode_count_after_resizing]
                },
                {
                    'operation': 'add',
                    'info': [
                        'new-worker-node-tt-dn',
                        self.mapr_config.SCALE_NEW_NG_COUNT,
                        '%s' % node_group_template_tt_dn_id
                    ]
                }
            ]
            try:
                new_cluster_info = self.cluster_scaling(cluster_info,
                                                        change_list)
                self.await_active_tasktracker(
                    new_cluster_info['node_info'], self.mapr_config)
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(
                        cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )
                    message = 'Failure while cluster scaling: '
                    self.print_error_log(message, e)

# -------------------------CINDER TESTING AFTER SCALING------------------------

            try:
                self.cinder_volume_testing(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(
                        new_cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )
                    message = ('Failure while Cinder testing after cluster '
                               'scaling: ')
                    self.print_error_log(message, e)

# ----------------------MAP REDUCE TESTING AFTER SCALING-----------------------

            try:
                self.map_reduce_testing(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(
                        new_cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )
                    message = ('Failure while Map Reduce testing after '
                               'cluster scaling: ')
                    self.print_error_log(message, e)

# -------------------CHECK SWIFT AVAILABILITY AFTER SCALING--------------------

            try:
                self.check_swift_availability(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(
                        new_cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )
                    message = ('Failure during check of Swift availability '
                               'after cluster scaling: ')
                    self.print_error_log(message, e)

# ---------------------------DELETE CREATED OBJECTS----------------------------

        self.delete_objects(
            cluster_info['cluster_id'], cluster_template_id,
            node_group_template_id_list
        )
示例#20
0
 def get_plugin_config(self):
     return cfg.ITConfig().vanilla_config
示例#21
0
 def get_plugin_config(self):
     return cfg.ITConfig().hdp2_config
示例#22
0
class HDP2GatingTest(swift.SwiftTest, scaling.ScalingTest, edp.EDPTest):

    config = cfg.ITConfig().hdp2_config
    SKIP_EDP_TEST = config.SKIP_EDP_TEST
    SKIP_SWIFT_TEST = config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = config.SKIP_SCALING_TEST

    def setUp(self):
        super(HDP2GatingTest, self).setUp()
        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []

    def _prepare_test(self):
        self.hdp2_config = cfg.ITConfig().hdp2_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = (
                self.get_floating_ip_pool_id_for_neutron_net())

        self.hdp2_config.IMAGE_ID, self.hdp2_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.hdp2_config))

    @b.errormsg("Failure while 'rm-nn' node group template creation: ")
    def _create_rm_nn_ng_template(self):
        template = {
            'name': 'test-node-group-template-hdp2-rm-nn',
            'plugin_config': self.hdp2_config,
            'description': 'test node group template for HDP plugin',
            'node_processes': self.hdp2_config.MASTER_NODE_PROCESSES,
            'floating_ip_pool': self.floating_ip_pool,
            # TODO(sreshetniak): Enable auto security group when #1392738 is
            # resolved
            'auto_security_group': False,
            'node_configs': {}
        }
        self.ng_tmpl_rm_nn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_rm_nn_id)

    @b.errormsg("Failure while 'nm-dn' node group template creation: ")
    def _create_nm_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-hdp2-nm-dn',
            'plugin_config': self.hdp2_config,
            'description': 'test node group template for HDP plugin',
            'node_processes': self.hdp2_config.WORKER_NODE_PROCESSES,
            'floating_ip_pool': self.floating_ip_pool,
            'auto_security_group': True,
            'node_configs': {}
        }
        self.ng_tmpl_nm_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_nm_dn_id)

    @b.errormsg("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        template = {
            'name':
            'test-cluster-template-hdp2',
            'plugin_config':
            self.hdp2_config,
            'description':
            'test cluster template for HDP plugin',
            'cluster_configs': {
                'YARN': {
                    'yarn.log-aggregation-enable': False
                }
            },
            'node_groups': [{
                'name': 'master-node-dn',
                'node_group_template_id': self.ng_tmpl_rm_nn_id,
                'count': 1
            }, {
                'name': 'worker-node-nm',
                'node_group_template_id': self.ng_tmpl_nm_dn_id,
                'count': 3
            }],
            'net_id':
            self.internal_neutron_net
        }
        self.cluster_template_id = self.create_cluster_template(**template)

    @b.errormsg("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster_name = '%s-%s-v2' % (self.common_config.CLUSTER_NAME,
                                     self.hdp2_config.PLUGIN_NAME)
        cluster = {
            'name': cluster_name,
            'plugin_config': self.hdp2_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {}
        }
        cluster_id = self.create_cluster(**cluster)
        self.poll_cluster_state(cluster_id)
        self.cluster_info = self.get_cluster_info(self.hdp2_config)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.hdp2_config)

    @b.errormsg("Failure during check of Swift availability: ")
    def _check_swift(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing: ")
    def _check_edp(self):
        self.poll_jobs_status(list(self._run_edp_test()))

    def _run_edp_test(self):
        # check pig
        pig_job = self.edp_info.read_pig_example_script()
        pig_lib = self.edp_info.read_pig_example_jar()

        yield self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
                               job_data_list=[{
                                   'pig': pig_job
                               }],
                               lib_data_list=[{
                                   'jar': pig_lib
                               }],
                               swift_binaries=True,
                               hdfs_local_output=True)

        # check mapreduce
        mapreduce_jar = self.edp_info.read_mapreduce_example_jar()
        mapreduce_configs = self.edp_info.mapreduce_example_configs()
        yield self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE,
                               job_data_list=[],
                               lib_data_list=[{
                                   'jar': mapreduce_jar
                               }],
                               configs=mapreduce_configs,
                               swift_binaries=True,
                               hdfs_local_output=True)

        # check mapreduce streaming
        yield self.edp_testing(
            job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
            job_data_list=[],
            lib_data_list=[],
            configs=self.edp_info.mapreduce_streaming_configs())

        # check java
        java_jar = self.edp_info.read_java_example_lib(2)
        java_configs = self.edp_info.java_example_configs(2)
        yield self.edp_testing(utils_edp.JOB_TYPE_JAVA,
                               job_data_list=[],
                               lib_data_list=[{
                                   'jar': java_jar
                               }],
                               configs=java_configs)

    @b.errormsg("Failure while cluster scaling: ")
    def _check_scaling(self):
        datanode_count_after_resizing = (
            self.cluster_info['node_info']['datanode_count'] +
            self.hdp_config.SCALE_EXISTING_NG_COUNT)
        change_list = [{
            'operation':
            'resize',
            'info': ['worker-node-nm', datanode_count_after_resizing]
        }, {
            'operation':
            'add',
            'info': [
                'new-worker-node-tt-dn', self.hdp2_config.SCALE_NEW_NG_COUNT,
                '%s' % self.ng_tmpl_nm_dn_id
            ]
        }]

        self.cluster_info = self.cluster_scaling(self.cluster_info,
                                                 change_list)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.hdp2_config)

    @b.errormsg(
        "Failure during check of Swift availability after cluster scaling: ")
    def _check_swift_after_scaling(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing after cluster scaling: ")
    def _check_edp_after_scaling(self):
        self._check_edp()

    @testcase.attr('hdp2')
    @testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN,
                     'All tests for HDP2 plugin were skipped')
    def test_hdp2_plugin_gating(self):
        self._prepare_test()
        self._create_rm_nn_ng_template()
        self._create_nm_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()

        self._check_swift()
        self._check_edp()

        if not self.hdp2_config.SKIP_SCALING_TEST:
            self._check_scaling()
            self._check_swift_after_scaling()
            self._check_edp_after_scaling()

    def tearDown(self):
        self.delete_objects(self.cluster_id, self.cluster_template_id,
                            self.ng_template_ids)
        super(HDP2GatingTest, self).tearDown()
示例#23
0
class VanillaTwoGatingTest(cluster_configs.ClusterConfigTest,
                           map_reduce.MapReduceTest, swift.SwiftTest,
                           scaling.ScalingTest, cinder.CinderVolumeTest,
                           edp.EDPTest):

    vanilla_two_config = cfg.ITConfig().vanilla_two_config
    SKIP_MAP_REDUCE_TEST = vanilla_two_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = vanilla_two_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = vanilla_two_config.SKIP_SCALING_TEST
    SKIP_CINDER_TEST = vanilla_two_config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = vanilla_two_config.SKIP_EDP_TEST

    def setUp(self):
        super(VanillaTwoGatingTest, self).setUp()
        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []

    def _prepare_test(self):
        self.vanilla_two_config = cfg.ITConfig().vanilla_two_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = \
                self.get_floating_ip_pool_id_for_neutron_net()

        self.vanilla_two_config.IMAGE_ID, self.vanilla_two_config.SSH_USERNAME\
            = (self.get_image_id_and_ssh_username(self.vanilla_two_config))

        self.volumes_per_node = 0
        self.volume_size = 0
        if not self.SKIP_CINDER_TEST:
            self.volumes_per_node = 2
            self.volume_size = 2

    @b.errormsg("Failure while 'nm-dn' node group template creation: ")
    def _create_nm_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-vanilla-nm-dn',
            'plugin_config': self.vanilla_two_config,
            'description': 'test node group template for Vanilla plugin',
            'node_processes': ['nodemanager', 'datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_nm_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_nm_dn_id)

    @b.errormsg("Failure while 'nm' node group template creation: ")
    def _create_nm_ng_template(self):
        template = {
            'name': 'test-node-group-template-vanilla-nm',
            'plugin_config': self.vanilla_two_config,
            'description': 'test node group template for Vanilla plugin',
            'volumes_per_node': self.volumes_per_node,
            'volume_size': self.volume_size,
            'node_processes': ['nodemanager'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_nm_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_nm_id)

    @b.errormsg("Failure while 'dn' node group template creation: ")
    def _create_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-vanilla-dn',
            'plugin_config': self.vanilla_two_config,
            'description': 'test node group template for Vanilla plugin',
            'volumes_per_node': self.volumes_per_node,
            'volume_size': self.volume_size,
            'node_processes': ['datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_dn_id)

    @b.errormsg("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        template = {
            'name': 'test-cluster-template-vanilla',
            'plugin_config': self.vanilla_two_config,
            'description': 'test cluster template for Vanilla plugin',
            'cluster_configs': {
                'HDFS': {
                    'dfs.replication': 1
                }
            },
            'node_groups': [
                {
                    'name': 'master-node-rm-nn',
                    'flavor_id': self.flavor_id,
                    'node_processes': ['namenode', 'resourcemanager'],
                    'floating_ip_pool': self.floating_ip_pool,
                    'count': 1
                },
                {
                    'name': 'master-node-oo-hs',
                    'flavor_id': self.flavor_id,
                    'node_processes': ['oozie', 'historyserver',
                                       'secondarynamenode'],
                    'floating_ip_pool': self.floating_ip_pool,
                    'count': 1
                },
                {
                    'name': 'worker-node-nm-dn',
                    'node_group_template_id': self.ng_tmpl_nm_dn_id,
                    'count': 2
                },
                {
                    'name': 'worker-node-dn',
                    'node_group_template_id': self.ng_tmpl_dn_id,
                    'count': 1
                },
                {
                    'name': 'worker-node-nm',
                    'node_group_template_id': self.ng_tmpl_nm_id,
                    'count': 1
                }
            ],
            'net_id': self.internal_neutron_net
        }
        self.cluster_template_id = self.create_cluster_template(**template)

    @b.errormsg("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster_name = '%s-%s-v2' % (self.common_config.CLUSTER_NAME,
                                     self.vanilla_two_config.PLUGIN_NAME)
        cluster = {
            'name': cluster_name,
            'plugin_config': self.vanilla_two_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {}
        }
        self.create_cluster(**cluster)
        self.cluster_info = self.get_cluster_info(self.vanilla_two_config)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.vanilla_two_config)

    @b.errormsg("Failure while Cinder testing: ")
    def _check_cinder(self):
        self.cinder_volume_testing(self.cluster_info)

    @b.errormsg("Failure while Map Reduce testing: ")
    def _check_mapreduce(self):
        self.map_reduce_testing(self.cluster_info)

    @b.errormsg("Failure during check of Swift availability: ")
    def _check_swift(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing: ")
    def _check_edp(self):
        self._edp_test()

    def _edp_test(self):
        path = 'tests/integration/tests/resources/'

        # check pig
        pig_job = f.get_file_text(path + 'edp-job.pig')
        pig_lib = f.get_file_text(path + 'edp-lib.jar')
        self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
                         job_data_list=[{'pig': pig_job}],
                         lib_data_list=[{'jar': pig_lib}],
                         swift_binaries=True,
                         hdfs_local_output=True)

        # check mapreduce
        mapreduce_jar = f.get_file_text(path + 'edp-mapreduce.jar')
        mapreduce_configs = {
            'configs': {
                'mapred.mapper.class': 'org.apache.oozie.example.SampleMapper',
                'mapred.reducer.class':
                'org.apache.oozie.example.SampleReducer'
            }
        }
        self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE,
                         job_data_list=[],
                         lib_data_list=[{'jar': mapreduce_jar}],
                         configs=mapreduce_configs,
                         swift_binaries=True,
                         hdfs_local_output=True)

        # check mapreduce streaming
        mapreduce_streaming_configs = {
            'configs': {
                'edp.streaming.mapper': '/bin/cat',
                'edp.streaming.reducer': '/usr/bin/wc'
            }
        }
        self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
                         job_data_list=[],
                         lib_data_list=[],
                         configs=mapreduce_streaming_configs)

        # check java
        java_jar = f.get_file_text(
            path + 'hadoop-mapreduce-examples-2.3.0.jar')
        java_configs = {
            'configs': {
                'edp.java.main_class':
                'org.apache.hadoop.examples.QuasiMonteCarlo'
            },
            'args': ['10', '10']
        }
        self.edp_testing(utils_edp.JOB_TYPE_JAVA,
                         job_data_list=[],
                         lib_data_list=[{'jar': java_jar}],
                         configs=java_configs)

    @b.errormsg("Failure while cluster scaling: ")
    def _check_scaling(self):
        change_list = [
            {
                'operation': 'resize',
                'info': ['worker-node-nm-dn', 1]
            },
            {
                'operation': 'resize',
                'info': ['worker-node-dn', 0]
            },
            {
                'operation': 'resize',
                'info': ['worker-node-nm', 0]
            },
            {
                'operation': 'add',
                'info': [
                    'new-worker-node-nm', 1, '%s' % self.ng_tmpl_nm_id
                ]
            },
            {
                'operation': 'add',
                'info': [
                    'new-worker-node-dn', 1, '%s' % self.ng_tmpl_dn_id
                ]
            }
        ]

        self.cluster_info = self.cluster_scaling(self.cluster_info,
                                                 change_list)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.vanilla_two_config)

    @b.errormsg("Failure while Cinder testing after cluster scaling: ")
    def _check_cinder_after_scaling(self):
        self.cinder_volume_testing(self.cluster_info)

    @b.errormsg("Failure while Map Reduce testing after cluster scaling: ")
    def _check_mapreduce_after_scaling(self):
        self.map_reduce_testing(self.cluster_info)

    @b.errormsg(
        "Failure during check of Swift availability after cluster scaling: ")
    def _check_swift_after_scaling(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing after cluster scaling: ")
    def _check_edp_after_scaling(self):
        self._edp_test()

    @testcase.skipIf(
        cfg.ITConfig().vanilla_two_config.SKIP_ALL_TESTS_FOR_PLUGIN,
        "All tests for Vanilla plugin were skipped")
    @testcase.attr('vanilla2')
    def test_vanilla_two_plugin_gating(self):
        self._prepare_test()
        self._create_nm_dn_ng_template()
        self._create_nm_ng_template()
        self._create_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()

        self._check_cinder()
        self._check_mapreduce()
        self._check_swift()
        self._check_edp()

        if not self.vanilla_two_config.SKIP_SCALING_TEST:
            self._check_scaling()
            self._check_cinder_after_scaling()
            self._check_mapreduce_after_scaling()
            self._check_swift_after_scaling()
            self._check_edp_after_scaling()

    def tearDown(self):
        self.delete_objects(self.cluster_id, self.cluster_template_id,
                            self.ng_template_ids)
        super(VanillaTwoGatingTest, self).tearDown()
 def get_plugin_config(self):
     return cfg.ITConfig().mapr_311_config
示例#25
0
class IDH3GatingTest(swift.SwiftTest, scaling.ScalingTest):

    idh3_config = cfg.ITConfig().idh3_config
    SKIP_SWIFT_TEST = idh3_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = idh3_config.SKIP_SCALING_TEST

    def setUp(self):
        super(IDH3GatingTest, self).setUp()

        self.idh3_config = cfg.ITConfig().idh3_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = \
                self.get_floating_ip_pool_id_for_neutron_net()

        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []
        self.idh3_config.IMAGE_ID, self.idh3_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.idh3_config))

    @base.errormsg("Failure while 'tt-dn' node group template creation: ")
    def _create_tt_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-idh3-tt-dn',
            'plugin_config': self.idh3_config,
            'description': 'test node group template for Intel plugin',
            'volumes_per_node': 0,
            'volume_size': 0,
            'node_processes': ['nodemanager', 'datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_tt_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_tt_dn_id)

    @base.errormsg("Failure while 'tt' node group template creation: ")
    def _create_tt_ng_template(self):
        template = {
            'name': 'test-node-group-template-idh3-tt',
            'plugin_config': self.idh3_config,
            'description': 'test node group template for Intel plugin',
            'volumes_per_node': 0,
            'volume_size': 0,
            'node_processes': ['nodemanager'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_tt_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_tt_id)

    @base.errormsg("Failure while 'dn' node group template creation: ")
    def _create_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-idh3-dn',
            'plugin_config': self.idh3_config,
            'description': 'test node group template for Intel plugin',
            'volumes_per_node': 0,
            'volume_size': 0,
            'node_processes': ['datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_dn_id)

    @base.errormsg("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        template = {
            'name':
            'test-cluster-template-idh3',
            'plugin_config':
            self.idh3_config,
            'description':
            'test cluster template for Intel plugin',
            'cluster_configs': {
                'general': {
                    'Enable Swift': True,
                    'IDH tarball URL': self.idh3_config.IDH_TARBALL_URL,
                    'IDH repository URL': self.idh3_config.IDH_REPO_URL,
                    'OS repository URL': self.idh3_config.OS_REPO_URL
                },
                'HDFS': {
                    'dfs.replication': 1
                }
            },
            'node_groups': [{
                'name': 'manager-node',
                'flavor_id': self.idh3_config.MANAGER_FLAVOR_ID,
                'node_processes': ['manager'],
                'floating_ip_pool': self.floating_ip_pool,
                'count': 1
            }, {
                'name':
                'master-node-jt-nn-hm',
                'flavor_id':
                self.flavor_id,
                'node_processes':
                ['namenode', 'resourcemanager', 'historyserver'],
                'floating_ip_pool':
                self.floating_ip_pool,
                'count':
                1
            }, {
                'name': 'worker-node-tt-dn',
                'node_group_template_id': self.ng_tmpl_tt_dn_id,
                'count': 2
            }, {
                'name': 'worker-node-dn',
                'node_group_template_id': self.ng_tmpl_dn_id,
                'count': 1
            }, {
                'name': 'worker-node-tt',
                'node_group_template_id': self.ng_tmpl_tt_id,
                'count': 1
            }],
            'net_id':
            self.internal_neutron_net
        }
        self.cluster_template_id = self.create_cluster_template(**template)

    @base.errormsg("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster_name = '%s-%s-v3' % (self.common_config.CLUSTER_NAME,
                                     self.idh3_config.PLUGIN_NAME)
        cluster = {
            'name': cluster_name,
            'plugin_config': self.idh3_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {}
        }
        self.create_cluster(**cluster)
        self.cluster_info = self.get_cluster_info(self.idh3_config)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.idh3_config)

    @base.errormsg("Failure during check of Swift availability: ")
    def _check_swift(self):
        self.check_swift_availability(self.cluster_info)

    @base.errormsg("Failure while cluster scaling: ")
    def _check_scaling(self):
        change_list = [{
            'operation': 'resize',
            'info': ['worker-node-tt-dn', 4]
        }, {
            'operation': 'resize',
            'info': ['worker-node-dn', 0]
        }, {
            'operation': 'resize',
            'info': ['worker-node-tt', 0]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-tt', 1,
                     '%s' % self.ng_tmpl_tt_id]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-dn', 1,
                     '%s' % self.ng_tmpl_dn_id]
        }]

        self.cluster_info = self.cluster_scaling(self.cluster_info,
                                                 change_list)

    @base.errormsg(
        "Failure during check of Swift availability after cluster scaling: ")
    def _check_swift_after_scaling(self):
        if not self.idh3_config.SKIP_SCALING_TEST:
            self.check_swift_availability(self.cluster_info)

    @unittest2.skipIf(cfg.ITConfig().idh3_config.SKIP_ALL_TESTS_FOR_PLUGIN,
                      "All tests for Intel plugin were skipped")
    @testcase.attr('idh3')
    def test_idh_plugin_gating(self):
        self._create_tt_dn_ng_template()
        self._create_tt_ng_template()
        self._create_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()

        self._check_swift()
        self._check_scaling()
        self._check_swift_after_scaling()

    def tearDown(self):
        self.delete_objects(self.cluster_id, self.cluster_template_id,
                            self.ng_template_ids)
        super(IDH3GatingTest, self).tearDown()
示例#26
0
 def get_plugin_config(self):
     return cfg.ITConfig().spark_config
示例#27
0
class CDHGatingTest(cluster_configs.ClusterConfigTest,
                    map_reduce.MapReduceTest, swift.SwiftTest,
                    scaling.ScalingTest, cinder.CinderVolumeTest, edp.EDPTest):

    cdh_config = cfg.ITConfig().cdh_config
    SKIP_MAP_REDUCE_TEST = cdh_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = cdh_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = cdh_config.SKIP_SCALING_TEST
    SKIP_CINDER_TEST = cdh_config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = cdh_config.SKIP_EDP_TEST

    def setUp(self):
        super(CDHGatingTest, self).setUp()
        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []

    def _prepare_test(self):
        self.cdh_config = cfg.ITConfig().cdh_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = (
                self.get_floating_ip_pool_id_for_neutron_net())

        self.cdh_config.IMAGE_ID, self.cdh_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.cdh_config))

        self.volumes_per_node = 0
        self.volume_size = 0
        if not self.SKIP_CINDER_TEST:
            self.volumes_per_node = 2
            self.volume_size = 2

    @b.errormsg("Failure while 'nm-dn' node group template creation: ")
    def _create_nm_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-cdh-nm-dn',
            'plugin_config': self.cdh_config,
            'description': 'test node group template for CDH plugin',
            'node_processes': ['NODEMANAGER', 'DATANODE'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_nm_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_nm_dn_id)

    @b.errormsg("Failure while 'nm' node group template creation: ")
    def _create_nm_ng_template(self):
        template = {
            'name': 'test-node-group-template-cdh-nm',
            'plugin_config': self.cdh_config,
            'description': 'test node group template for CDH plugin',
            'volumes_per_node': self.volumes_per_node,
            'volume_size': self.volume_size,
            'node_processes': ['NODEMANAGER'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_nm_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_nm_id)

    @b.errormsg("Failure while 'dn' node group template creation: ")
    def _create_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-cdh-dn',
            'plugin_config': self.cdh_config,
            'description': 'test node group template for CDH plugin',
            'volumes_per_node': self.volumes_per_node,
            'volume_size': self.volume_size,
            'node_processes': ['DATANODE'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_dn_id)

    @b.errormsg("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        cl_config = {
            'general': {
                'CDH5 repo list URL': self.cdh_config.CDH_REPO_LIST_URL,
                'CM5 repo list URL': self.cdh_config.CM_REPO_LIST_URL,
                'CDH5 repo key URL (for debian-based only)':
                self.cdh_config.CDH_APT_KEY_URL,
                'CM5 repo key URL (for debian-based only)':
                self.cdh_config.CM_APT_KEY_URL,
                'Enable Swift': True
            }
        }
        template = {
            'name':
            'test-cluster-template-cdh',
            'plugin_config':
            self.cdh_config,
            'description':
            'test cluster template for CDH plugin',
            'cluster_configs':
            cl_config,
            'node_groups': [{
                'name': 'manager-node',
                'flavor_id': self.cdh_config.MANAGERNODE_FLAVOR,
                'node_processes': ['MANAGER'],
                'floating_ip_pool': self.floating_ip_pool,
                'count': 1
            }, {
                'name': 'master-node-rm-nn',
                'flavor_id': self.flavor_id,
                'node_processes': ['NAMENODE', 'RESOURCEMANAGER'],
                'floating_ip_pool': self.floating_ip_pool,
                'count': 1
            }, {
                'name':
                'master-node-oo-hs-snn',
                'flavor_id':
                self.flavor_id,
                'node_processes':
                ['OOZIE_SERVER', 'JOBHISTORY', 'SECONDARYNAMENODE'],
                'floating_ip_pool':
                self.floating_ip_pool,
                'count':
                1
            }, {
                'name': 'worker-node-nm-dn',
                'node_group_template_id': self.ng_tmpl_nm_dn_id,
                'count': 2
            }, {
                'name': 'worker-node-dn',
                'node_group_template_id': self.ng_tmpl_dn_id,
                'count': 1
            }, {
                'name': 'worker-node-nm',
                'node_group_template_id': self.ng_tmpl_nm_id,
                'count': 1
            }],
            'net_id':
            self.internal_neutron_net
        }
        self.cluster_template_id = self.create_cluster_template(**template)

    @b.errormsg("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster_name = '%s-%s' % (self.common_config.CLUSTER_NAME,
                                  self.cdh_config.PLUGIN_NAME)
        cluster = {
            'name': cluster_name,
            'plugin_config': self.cdh_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {
                'HDFS': {
                    'dfs_replication': 1
                }
            }
        }
        self.create_cluster(**cluster)
        self.cluster_info = self.get_cluster_info(self.cdh_config)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.cdh_config)

    @b.errormsg("Failure while Cinder testing: ")
    def _check_cinder(self):
        self.cinder_volume_testing(self.cluster_info)

    @b.errormsg("Failure while Map Reduce testing: ")
    def _check_mapreduce(self):
        self.map_reduce_testing(self.cluster_info, check_log=False)

    @b.errormsg("Failure during check of Swift availability: ")
    def _check_swift(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing: ")
    def _check_edp(self):
        self._edp_test()

    def _edp_test(self):
        # check pig
        pig_job = self.edp_info.read_pig_example_script()
        pig_lib = self.edp_info.read_pig_example_jar()
        self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
                         job_data_list=[{
                             'pig': pig_job
                         }],
                         lib_data_list=[{
                             'jar': pig_lib
                         }],
                         swift_binaries=False,
                         hdfs_local_output=True)

        # check mapreduce
        mapreduce_jar = self.edp_info.read_mapreduce_example_jar()
        mapreduce_configs = self.edp_info.mapreduce_example_configs()
        self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE,
                         job_data_list=[],
                         lib_data_list=[{
                             'jar': mapreduce_jar
                         }],
                         configs=mapreduce_configs,
                         swift_binaries=False,
                         hdfs_local_output=True)

        # check mapreduce streaming
        self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
                         job_data_list=[],
                         lib_data_list=[],
                         configs=self.edp_info.mapreduce_streaming_configs(),
                         swift_binaries=False,
                         hdfs_local_output=True)

    @b.errormsg("Failure while cluster scaling: ")
    def _check_scaling(self):
        change_list = [{
            'operation': 'resize',
            'info': ['worker-node-nm-dn', 1]
        }, {
            'operation': 'resize',
            'info': ['worker-node-dn', 0]
        }, {
            'operation': 'resize',
            'info': ['worker-node-nm', 0]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-nm', 1,
                     '%s' % self.ng_tmpl_nm_id]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-dn', 1,
                     '%s' % self.ng_tmpl_dn_id]
        }]

        self.cluster_info = self.cluster_scaling(self.cluster_info,
                                                 change_list)
        self.await_active_workers_for_namenode(self.cluster_info['node_info'],
                                               self.cdh_config)

    @b.errormsg("Failure while Cinder testing after cluster scaling: ")
    def _check_cinder_after_scaling(self):
        self.cinder_volume_testing(self.cluster_info)

    @b.errormsg("Failure while Map Reduce testing after cluster scaling: ")
    def _check_mapreduce_after_scaling(self):
        self.map_reduce_testing(self.cluster_info, check_log=False)

    @b.errormsg(
        "Failure during check of Swift availability after cluster scaling: ")
    def _check_swift_after_scaling(self):
        self.check_swift_availability(self.cluster_info)

    @b.errormsg("Failure while EDP testing after cluster scaling: ")
    def _check_edp_after_scaling(self):
        self._edp_test()

    @testcase.skipIf(cfg.ITConfig().cdh_config.SKIP_ALL_TESTS_FOR_PLUGIN,
                     "All tests for CDH plugin were skipped")
    @testcase.attr('cdh')
    def test_cdh_plugin_gating(self):
        self._prepare_test()
        self._create_nm_dn_ng_template()
        self._create_nm_ng_template()
        self._create_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()

        self._check_cinder()
        self._check_mapreduce()
        self._check_swift()
        self._check_edp()

        if not self.cdh_config.SKIP_SCALING_TEST:
            self._check_scaling()
            self._check_cinder_after_scaling()
            self._check_edp_after_scaling()

    def tearDown(self):
        self.delete_objects(self.cluster_id, self.cluster_template_id,
                            self.ng_template_ids)
        super(CDHGatingTest, self).tearDown()