Example #1
0
    def setUp(self):

        self.common_config = cfg.ITConfig().common_config
        self.vanilla_config = cfg.ITConfig().vanilla_config
        self.hdp_config = cfg.ITConfig().hdp_config

        telnetlib.Telnet(self.common_config.SAVANNA_HOST,
                         self.common_config.SAVANNA_PORT)

        self.savanna = savanna_client.Client(
            username=self.common_config.OS_USERNAME,
            api_key=self.common_config.OS_PASSWORD,
            project_name=self.common_config.OS_TENANT_NAME,
            auth_url=self.common_config.OS_AUTH_URL,
            savanna_url='http://%s:%s/%s' %
            (self.common_config.SAVANNA_HOST, self.common_config.SAVANNA_PORT,
             self.common_config.SAVANNA_API_VERSION))

        self.nova = nova_client.Client(self.common_config.OS_USERNAME,
                                       self.common_config.OS_PASSWORD,
                                       self.common_config.OS_TENANT_NAME,
                                       self.common_config.OS_AUTH_URL,
                                       service_type='compute')

        if not self.common_config.PATH_TO_SSH_KEY:

            self.private_key = self.nova.keypairs.create(
                self.common_config.USER_KEYPAIR_ID).private_key

        else:
            self.private_key = open(self.common_config.PATH_TO_SSH_KEY).read()
Example #2
0
    def setUp(self):
        self.common_config = cfg.ITConfig().common_config
        self.vanilla_config = cfg.ITConfig().vanilla_config
        self.hdp_config = cfg.ITConfig().hdp_config
        self.idh_config = cfg.ITConfig().idh_config

        telnetlib.Telnet(
            self.common_config.SAVANNA_HOST, self.common_config.SAVANNA_PORT
        )

        self.savanna = savanna_client.Client(
            self.common_config.SAVANNA_API_VERSION,
            username=self.common_config.OS_USERNAME,
            api_key=self.common_config.OS_PASSWORD,
            project_name=self.common_config.OS_TENANT_NAME,
            auth_url=self.common_config.OS_AUTH_URL,
            savanna_url='http://%s:%s/v%s/%s' % (
                self.common_config.SAVANNA_HOST,
                self.common_config.SAVANNA_PORT,
                self.common_config.SAVANNA_API_VERSION,
                self.common_config.OS_TENANT_ID
            ))

        self.nova = nova_client.Client(
            username=self.common_config.OS_USERNAME,
            api_key=self.common_config.OS_PASSWORD,
            project_id=self.common_config.OS_TENANT_NAME,
            auth_url=self.common_config.OS_AUTH_URL)

        self.neutron = neutron_client.Client(
            username=self.common_config.OS_USERNAME,
            password=self.common_config.OS_PASSWORD,
            tenant_name=self.common_config.OS_TENANT_NAME,
            auth_url=self.common_config.OS_AUTH_URL)

        if not self.common_config.FLAVOR_ID:
            self.flavor_id = self.nova.flavors.create(
                name='i-test-flavor-%s' % str(uuid.uuid4())[:8],
                ram=1024,
                vcpus=1,
                disk=10,
                ephemeral=10).id

        else:
            self.flavor_id = self.common_config.FLAVOR_ID

        if not self.common_config.PATH_TO_SSH_KEY:
            self.common_config.USER_KEYPAIR_ID += str(uuid.uuid4())[:8]
            self.private_key = self.nova.keypairs.create(
                self.common_config.USER_KEYPAIR_ID).private_key

        else:
            self.private_key = open(self.common_config.PATH_TO_SSH_KEY).read()
Example #3
0
    def setUp(self):
        super(IDHGatingTest, self).setUp()

        self.idh_config = cfg.ITConfig().idh_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = \
                self.get_floating_ip_pool_id_for_neutron_net()

        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []
        self.idh_config.IMAGE_ID, self.idh_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.idh_config))
Example #4
0
class HDPGatingTest(map_reduce.MapReduceTest, swift.SwiftTest,
                    scaling.ScalingTest):

    SKIP_MAP_REDUCE_TEST = cfg.ITConfig().hdp_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = cfg.ITConfig().hdp_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = cfg.ITConfig().hdp_config.SKIP_SCALING_TEST

    @attrib.attr(tags='hdp')
    @unittest2.skipIf(cfg.ITConfig().hdp_config.SKIP_ALL_TESTS_FOR_PLUGIN,
                      'All tests for HDP plugin were skipped')
    def test_hdp_plugin_gating(self):

        node_group_template_id_list = []

        #-------------------------------CLUSTER CREATION-------------------------------

        #-----------------------"tt-dn" node group template creation-------------------

        try:

            node_group_template_tt_dn_id = self.create_node_group_template(
                name='tt-dn',
                plugin_config=self.hdp_config,
                description='test node group template',
                volumes_per_node=0,
                volume_size=0,
                node_processes=[
                    'TASKTRACKER', 'DATANODE', 'HDFS_CLIENT',
                    'MAPREDUCE_CLIENT'
                ],
                node_configs={})
            node_group_template_id_list.append(node_group_template_tt_dn_id)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                message = 'Failure while \'tt-dn\' node group ' \
                          'template creation: '
                self.print_error_log(message, e)

#---------------------------Cluster template creation--------------------------

        try:

            cluster_template_id = self.create_cluster_template(
                name='test-cluster-template',
                plugin_config=self.hdp_config,
                description='test cluster template',
                cluster_configs={},
                node_groups=[
                    dict(name='master-node-jt-nn',
                         flavor_id=self.common_config.FLAVOR_ID,
                         node_processes=[
                             'JOBTRACKER', 'NAMENODE', 'SECONDARY_NAMENODE',
                             'GANGLIA_SERVER', 'NAGIOS_SERVER', 'AMBARI_SERVER'
                         ],
                         node_configs={},
                         count=1),
                    dict(name='worker-node-tt-dn',
                         node_group_template_id=node_group_template_tt_dn_id,
                         count=3)
                ])

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list)

                message = 'Failure while cluster template creation: '
                self.print_error_log(message, e)

#-------------------------------Cluster creation-------------------------------

        try:

            cluster_info = self.create_cluster_and_get_info(
                plugin_config=self.hdp_config,
                cluster_template_id=cluster_template_id,
                description='test cluster',
                cluster_configs={})

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(self.cluster_id, cluster_template_id,
                                    node_group_template_id_list)

                message = 'Failure while cluster creation: '
                self.print_error_log(message, e)

#------------------------------MAP REDUCE TESTING------------------------------

        try:

            self._map_reduce_testing(cluster_info)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)

                message = 'Failure while Map Reduce testing: '
                self.print_error_log(message, e)

#---------------------------CHECK SWIFT AVAILABILITY---------------------------

        try:

            self._check_swift_availability(cluster_info)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)

                message = 'Failure during check of Swift availability: '
                self.print_error_log(message, e)

#--------------------------------CLUSTER SCALING-------------------------------

        change_list = [{
            'operation': 'resize',
            'info': ['worker-node-tt-dn', 4]
        }, {
            'operation':
            'add',
            'info':
            ['new-worker-node-tt-dn', 1,
             '%s' % node_group_template_tt_dn_id]
        }]

        try:

            new_cluster_info = self._cluster_scaling(cluster_info, change_list)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)

                message = 'Failure while cluster scaling: '
                self.print_error_log(message, e)

        if not self.hdp_config.SKIP_SCALING_TEST:

            #-----------------------MAP REDUCE TESTING AFTER SCALING-----------------------

            try:

                self._map_reduce_testing(new_cluster_info)

            except Exception as e:

                with excutils.save_and_reraise_exception():

                    self.delete_objects(new_cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)

                    message = 'Failure while Map Reduce testing after ' \
                              'cluster scaling: '
                    self.print_error_log(message, e)

#--------------------CHECK SWIFT AVAILABILITY AFTER SCALING--------------------

            try:

                self._check_swift_availability(new_cluster_info)

            except Exception as e:

                with excutils.save_and_reraise_exception():

                    self.delete_objects(new_cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)

                    message = 'Failure during check of Swift availability ' \
                              'after cluster scaling: '
                    self.print_error_log(message, e)

#----------------------------DELETE CREATED OBJECTS----------------------------

        self.delete_objects(cluster_info['cluster_id'], cluster_template_id,
                            node_group_template_id_list)
Example #5
0
class IDHGatingTest(cluster_configs.ClusterConfigTest, edp.EDPTest,
                    map_reduce.MapReduceTest, swift.SwiftTest,
                    scaling.ScalingTest):

    idh_config = cfg.ITConfig().idh_config
    SKIP_MAP_REDUCE_TEST = idh_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = idh_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = idh_config.SKIP_SCALING_TEST

    def setUp(self):
        super(IDHGatingTest, self).setUp()

        self.idh_config = cfg.ITConfig().idh_config
        self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
        self.internal_neutron_net = None
        if self.common_config.NEUTRON_ENABLED:
            self.internal_neutron_net = self.get_internal_neutron_net_id()
            self.floating_ip_pool = \
                self.get_floating_ip_pool_id_for_neutron_net()

        self.cluster_id = None
        self.cluster_template_id = None
        self.ng_template_ids = []
        self.idh_config.IMAGE_ID, self.idh_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.idh_config))

    @errormessage("Failure while 'tt-dn' node group template creation: ")
    def _create_tt_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-idh-tt-dn',
            'plugin_config': self.idh_config,
            'description': 'test node group template for Intel plugin',
            'volumes_per_node': 0,
            'volume_size': 0,
            'node_processes': ['tasktracker', 'datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_tt_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_tt_dn_id)

    @errormessage("Failure while 'tt' node group template creation: ")
    def _create_tt_ng_template(self):
        template = {
            'name': 'test-node-group-template-idh-tt',
            'plugin_config': self.idh_config,
            'description': 'test node group template for Intel plugin',
            'volumes_per_node': 0,
            'volume_size': 0,
            'node_processes': ['tasktracker'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_tt_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_tt_id)

    @errormessage("Failure while 'dn' node group template creation: ")
    def _create_dn_ng_template(self):
        template = {
            'name': 'test-node-group-template-idh-dn',
            'plugin_config': self.idh_config,
            'description': 'test node group template for Intel plugin',
            'volumes_per_node': 0,
            'volume_size': 0,
            'node_processes': ['datanode'],
            'floating_ip_pool': self.floating_ip_pool,
            'node_configs': {}
        }
        self.ng_tmpl_dn_id = self.create_node_group_template(**template)
        self.ng_template_ids.append(self.ng_tmpl_dn_id)

    @errormessage("Failure while cluster template creation: ")
    def _create_cluster_template(self):
        template = {
            'name':
            'test-cluster-template-idh',
            'plugin_config':
            self.idh_config,
            'description':
            'test cluster template for Intel plugin',
            'cluster_configs': {
                'general': {
                    'Enable Swift': True,
                    'IDH tarball URL': self.idh_config.IDH_TARBALL_URL,
                    'IDH repository URL': self.idh_config.IDH_REPO_URL,
                    'OS repository URL': self.idh_config.OS_REPO_URL
                },
                'HDFS': {
                    'dfs.replication': 1
                }
            },
            'node_groups': [{
                'name': 'manager-node',
                'flavor_id': self.idh_config.MANAGER_FLAVOR_ID,
                'node_processes': ['manager'],
                'floating_ip_pool': self.floating_ip_pool,
                'count': 1
            }, {
                'name': 'master-node-jt-nn',
                'flavor_id': self.flavor_id,
                'node_processes': ['namenode', 'jobtracker'],
                'floating_ip_pool': self.floating_ip_pool,
                'count': 1
            }, {
                'name': 'worker-node-tt-dn',
                'node_group_template_id': self.ng_tmpl_tt_dn_id,
                'count': 2
            }, {
                'name': 'worker-node-dn',
                'node_group_template_id': self.ng_tmpl_dn_id,
                'count': 1
            }, {
                'name': 'worker-node-tt',
                'node_group_template_id': self.ng_tmpl_tt_id,
                'count': 1
            }],
            'net_id':
            self.internal_neutron_net
        }
        self.cluster_template_id = self.create_cluster_template(**template)

    @errormessage("Failure while cluster creation: ")
    def _create_cluster(self):
        cluster = {
            'plugin_config': self.idh_config,
            'cluster_template_id': self.cluster_template_id,
            'description': 'test cluster',
            'cluster_configs': {}
        }
        self.cluster_info = self.create_cluster_and_get_info(**cluster)

    @errormessage("Failure while Map Reduce testing: ")
    def _check_mapreduce(self):
        self.map_reduce_testing(self.cluster_info)

    @errormessage("Failure during check of Swift availability: ")
    def _check_swift(self):
        self.check_swift_availability(self.cluster_info)

    @errormessage("Failure while cluster scaling: ")
    def _check_scaling(self):
        change_list = [{
            'operation': 'resize',
            'info': ['worker-node-tt-dn', 4]
        }, {
            'operation': 'resize',
            'info': ['worker-node-dn', 0]
        }, {
            'operation': 'resize',
            'info': ['worker-node-tt', 0]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-tt', 1,
                     '%s' % self.ng_tmpl_tt_id]
        }, {
            'operation':
            'add',
            'info': ['new-worker-node-dn', 1,
                     '%s' % self.ng_tmpl_dn_id]
        }]

        self.cluster_info = self.cluster_scaling(self.cluster_info,
                                                 change_list)

    @errormessage("Failure while Map Reduce testing after cluster scaling: ")
    def _check_mapreduce_after_scaling(self):
        if not self.idh_config.SKIP_SCALING_TEST:
            self.map_reduce_testing(self.cluster_info)

    @errormessage(
        "Failure during check of Swift availability after cluster scaling: ")
    def _check_swift_after_scaling(self):
        if not self.idh_config.SKIP_SCALING_TEST:
            self.check_swift_availability(self.cluster_info)

    @unittest2.skipIf(cfg.ITConfig().idh_config.SKIP_ALL_TESTS_FOR_PLUGIN,
                      "All tests for Intel plugin were skipped")
    @testcase.attr('idh')
    def test_idh_plugin_gating(self):
        self._create_tt_dn_ng_template()
        self._create_tt_ng_template()
        self._create_dn_ng_template()
        self._create_cluster_template()
        self._create_cluster()

        self._check_mapreduce()
        self._check_swift()
        self._check_scaling()
        self._check_mapreduce_after_scaling()
        self._check_swift_after_scaling()

    def tearDown(self):
        self.delete_objects(self.cluster_id, self.cluster_template_id,
                            self.ng_template_ids)
        super(IDHGatingTest, self).tearDown()
Example #6
0
class HDPGatingTest(cinder.CinderVolumeTest, edp.EDPTest,
                    map_reduce.MapReduceTest, swift.SwiftTest,
                    scaling.ScalingTest):
    SKIP_CINDER_TEST = cfg.ITConfig().hdp_config.SKIP_CINDER_TEST
    SKIP_EDP_TEST = cfg.ITConfig().hdp_config.SKIP_EDP_TEST
    SKIP_MAP_REDUCE_TEST = cfg.ITConfig().hdp_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = cfg.ITConfig().hdp_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = cfg.ITConfig().hdp_config.SKIP_SCALING_TEST

    @unittest2.skipIf(cfg.ITConfig().hdp_config.SKIP_ALL_TESTS_FOR_PLUGIN,
                      'All tests for HDP plugin were skipped')
    @testcase.attr('hdp')
    def test_hdp_plugin_gating(self):
        self.hdp_config.IMAGE_ID, self.hdp_config.SSH_USERNAME = (
            self.get_image_id_and_ssh_username(self.hdp_config))

        # Default value of self.common_config.FLOATING_IP_POOL is None
        floating_ip_pool = self.common_config.FLOATING_IP_POOL
        internal_neutron_net = None
        # If Neutron enabled then get ID of floating IP pool and ID of internal
        # Neutron network
        if self.common_config.NEUTRON_ENABLED:
            floating_ip_pool = self.get_floating_ip_pool_id_for_neutron_net()
            internal_neutron_net = self.get_internal_neutron_net_id()

        if not self.hdp_config.SKIP_CINDER_TEST:
            volumes_per_node = 2
            volume_size = 2
        else:
            volumes_per_node = 0
            volume_size = 0

        node_group_template_id_list = []

        #-------------------------------CLUSTER CREATION-------------------------------

        #-----------------------"tt-dn" node group template creation-------------------

        try:
            node_group_template_tt_dn_id = self.create_node_group_template(
                name='test-node-group-template-hdp-tt-dn',
                plugin_config=self.hdp_config,
                description='test node group template for HDP plugin',
                volumes_per_node=volumes_per_node,
                volume_size=volume_size,
                node_processes=[
                    'TASKTRACKER', 'DATANODE', 'HDFS_CLIENT',
                    'MAPREDUCE_CLIENT', 'OOZIE_CLIENT', 'PIG'
                ],
                node_configs={},
                floating_ip_pool=floating_ip_pool)
            node_group_template_id_list.append(node_group_template_tt_dn_id)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                message = 'Failure while \'tt-dn\' node group ' \
                          'template creation: '
                self.print_error_log(message, e)

#---------------------------Cluster template creation--------------------------

        try:
            cluster_template_id = self.create_cluster_template(
                name='test-cluster-template-hdp',
                plugin_config=self.hdp_config,
                description='test cluster template for HDP plugin',
                cluster_configs={},
                node_groups=[
                    dict(name='master-node-jt-nn',
                         flavor_id=self.flavor_id,
                         node_processes=[
                             'JOBTRACKER', 'NAMENODE', 'SECONDARY_NAMENODE',
                             'GANGLIA_SERVER', 'NAGIOS_SERVER',
                             'AMBARI_SERVER', 'OOZIE_SERVER'
                         ],
                         node_configs={},
                         floating_ip_pool=floating_ip_pool,
                         count=1),
                    dict(name='worker-node-tt-dn',
                         node_group_template_id=node_group_template_tt_dn_id,
                         count=3)
                ],
                net_id=internal_neutron_net)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list)
                message = 'Failure while cluster template creation: '
                self.print_error_log(message, e)

#-------------------------------Cluster creation-------------------------------

        try:
            cluster_info = self.create_cluster_and_get_info(
                plugin_config=self.hdp_config,
                cluster_template_id=cluster_template_id,
                description='test cluster',
                cluster_configs={})

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(self.cluster_id, cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while cluster creation: '
                self.print_error_log(message, e)

#---------------------------------CINDER TESTING-------------------------------

        try:
            self.cinder_volume_testing(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while Cinder testing: '
                self.print_error_log(message, e)

#----------------------------------EDP TESTING---------------------------------

        path = 'savanna/tests/integration/tests/resources/'
        pig_job_data = open(path + 'edp-job.pig').read()
        pig_lib_data = open(path + 'edp-lib.jar').read()
        mapreduce_jar_data = open(path + 'edp-mapreduce.jar').read()

        # This is a modified version of WordCount that takes swift configs
        java_lib_data = open(path + 'edp-java.jar').read()
        java_configs = {
            "configs": {
                "edp.java.main_class": "org.apache.hadoop.examples.WordCount"
            }
        }

        mapreduce_configs = {
            "configs": {
                "mapred.mapper.class": "org.apache.oozie.example.SampleMapper",
                "mapred.reducer.class":
                "org.apache.oozie.example.SampleReducer"
            }
        }
        mapreduce_streaming_configs = {
            "configs": {
                "edp.streaming.mapper": "/bin/cat",
                "edp.streaming.reducer": "/usr/bin/wc"
            }
        }
        try:
            self.edp_testing('Pig', [{
                'pig': pig_job_data
            }], [{
                'jar': pig_lib_data
            }])
            self.edp_testing('MapReduce', [], [{
                'jar': mapreduce_jar_data
            }], mapreduce_configs)
            self.edp_testing('MapReduce.Streaming', [], [],
                             mapreduce_streaming_configs)
            self.edp_testing('Java', [],
                             lib_data_list=[{
                                 'jar': java_lib_data
                             }],
                             configs=java_configs,
                             pass_input_output_args=True)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while EDP testing: '
                self.print_error_log(message, e)

#------------------------------MAP REDUCE TESTING------------------------------

        try:
            self.map_reduce_testing(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while Map Reduce testing: '
                self.print_error_log(message, e)

#---------------------------CHECK SWIFT AVAILABILITY---------------------------

        try:
            self.check_swift_availability(cluster_info)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure during check of Swift availability: '
                self.print_error_log(message, e)

#--------------------------------CLUSTER SCALING-------------------------------

        datanode_count_after_resizing = (
            cluster_info['node_info']['datanode_count'] +
            self.hdp_config.SCALE_EXISTING_NG_COUNT)
        change_list = [{
            'operation':
            'resize',
            'info': ['worker-node-tt-dn', datanode_count_after_resizing]
        }, {
            'operation':
            'add',
            'info': [
                'new-worker-node-tt-dn', self.hdp_config.SCALE_NEW_NG_COUNT,
                '%s' % node_group_template_tt_dn_id
            ]
        }]
        try:
            new_cluster_info = self.cluster_scaling(cluster_info, change_list)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                self.delete_objects(cluster_info['cluster_id'],
                                    cluster_template_id,
                                    node_group_template_id_list)
                message = 'Failure while cluster scaling: '
                self.print_error_log(message, e)

        if not self.hdp_config.SKIP_SCALING_TEST:

            #--------------------------CINDER TESTING AFTER SCALING------------------------

            try:
                self.cinder_volume_testing(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(new_cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)
                    message = 'Failure while Cinder testing after cluster ' \
                              'scaling: '
                    self.print_error_log(message, e)

#-----------------------MAP REDUCE TESTING AFTER SCALING-----------------------

            try:
                self.map_reduce_testing(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(new_cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)
                    message = 'Failure while Map Reduce testing after ' \
                              'cluster scaling: '
                    self.print_error_log(message, e)

#--------------------CHECK SWIFT AVAILABILITY AFTER SCALING--------------------

            try:
                self.check_swift_availability(new_cluster_info)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(new_cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)
                    message = 'Failure during check of Swift availability ' \
                              'after cluster scaling: '
                    self.print_error_log(message, e)

#----------------------------DELETE CREATED OBJECTS----------------------------

        self.delete_objects(cluster_info['cluster_id'], cluster_template_id,
                            node_group_template_id_list)
class VanillaGatingTest(cluster_configs.ClusterConfigTest,
                        map_reduce.MapReduceTest, swift.SwiftTest,
                        scaling.ScalingTest, edp.EDPTest):

    SKIP_CLUSTER_CONFIG_TEST = \
        cfg.ITConfig().vanilla_config.SKIP_CLUSTER_CONFIG_TEST
    SKIP_EDP_TEST = cfg.ITConfig().vanilla_config.SKIP_EDP_TEST
    SKIP_MAP_REDUCE_TEST = cfg.ITConfig().vanilla_config.SKIP_MAP_REDUCE_TEST
    SKIP_SWIFT_TEST = cfg.ITConfig().vanilla_config.SKIP_SWIFT_TEST
    SKIP_SCALING_TEST = cfg.ITConfig().vanilla_config.SKIP_SCALING_TEST

    @attrib.attr(tags='vanilla')
    @unittest2.skipIf(cfg.ITConfig().vanilla_config.SKIP_ALL_TESTS_FOR_PLUGIN,
                      'All tests for Vanilla plugin were skipped')
    def test_vanilla_plugin_gating(self):

        node_group_template_id_list = []

#-------------------------------CLUSTER CREATION-------------------------------

#---------------------"tt-dn" node group template creation---------------------

        try:

            node_group_template_tt_dn_id = self.create_node_group_template(
                name='tt-dn',
                plugin_config=self.vanilla_config,
                description='test node group template',
                volumes_per_node=0,
                volume_size=0,
                node_processes=['tasktracker', 'datanode'],
                node_configs={
                    'HDFS': cluster_configs.DN_CONFIG,
                    'MapReduce': cluster_configs.TT_CONFIG
                }
            )
            node_group_template_id_list.append(node_group_template_tt_dn_id)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                message = 'Failure while \'tt-dn\' node group ' \
                          'template creation: '
                self.print_error_log(message, e)

#-----------------------"tt" node group template creation----------------------

        try:

            node_group_template_tt_id = self.create_node_group_template(
                name='tt',
                plugin_config=self.vanilla_config,
                description='test node group template',
                volumes_per_node=0,
                volume_size=0,
                node_processes=['tasktracker'],
                node_configs={
                    'MapReduce': cluster_configs.TT_CONFIG
                }
            )
            node_group_template_id_list.append(node_group_template_tt_id)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list
                )

                message = 'Failure while \'tt\' node group template creation: '
                self.print_error_log(message, e)

#----------------------"dn" node group template creation-----------------------

        try:

            node_group_template_dn_id = self.create_node_group_template(
                name='dn',
                plugin_config=self.vanilla_config,
                description='test node group template',
                volumes_per_node=0,
                volume_size=0,
                node_processes=['datanode'],
                node_configs={
                    'HDFS': cluster_configs.DN_CONFIG
                }
            )
            node_group_template_id_list.append(node_group_template_dn_id)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list
                )

                message = 'Failure while \'dn\' node group template creation: '
                self.print_error_log(message, e)

#---------------------------Cluster template creation--------------------------

        try:

            cluster_template_id = self.create_cluster_template(
                name='test-cluster-template',
                plugin_config=self.vanilla_config,
                description='test cluster template',
                cluster_configs={
                    'HDFS': cluster_configs.CLUSTER_HDFS_CONFIG,
                    'MapReduce': cluster_configs.CLUSTER_MR_CONFIG,
                    'general': {'Enable Swift': True}
                },
                node_groups=[
                    dict(
                        name='master-node-jt-nn',
                        flavor_id=self.common_config.FLAVOR_ID,
                        node_processes=['namenode', 'jobtracker'],
                        node_configs={
                            'HDFS': cluster_configs.NN_CONFIG,
                            'MapReduce': cluster_configs.JT_CONFIG
                        },
                        count=1),
                    dict(
                        name='master-node-sec-nn',
                        flavor_id=self.common_config.FLAVOR_ID,
                        node_processes=['secondarynamenode', 'oozie'],
                        node_configs={
                            'JobFlow': cluster_configs.OOZIE_CONFIG
                        },
                        count=1),
                    dict(
                        name='worker-node-tt-dn',
                        node_group_template_id=node_group_template_tt_dn_id,
                        count=3),
                    dict(
                        name='worker-node-dn',
                        node_group_template_id=node_group_template_dn_id,
                        count=1),
                    dict(
                        name='worker-node-tt',
                        node_group_template_id=node_group_template_tt_id,
                        count=1)
                ]
            )

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    node_group_template_id_list=node_group_template_id_list
                )

                message = 'Failure while cluster template creation: '
                self.print_error_log(message, e)

#-------------------------------Cluster creation-------------------------------

        try:

            cluster_info = self.create_cluster_and_get_info(
                plugin_config=self.vanilla_config,
                cluster_template_id=cluster_template_id,
                description='test cluster',
                cluster_configs={}
            )

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    self.cluster_id, cluster_template_id,
                    node_group_template_id_list
                )

                message = 'Failure while cluster creation: '
                self.print_error_log(message, e)

#----------------------------CLUSTER CONFIG TESTING----------------------------

        try:
            self._cluster_config_testing(cluster_info)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )

                message = 'Failure while cluster config testing: '
                self.print_error_log(message, e)

#----------------------------------EDP TESTING---------------------------------

        job_data = open('integration/tests/resources/edp-job.pig').read()

        lib_data = open('integration/tests/resources/edp-lib.jar').read()

        job_jar_data = open('integration/tests/resources/edp-job.jar').read()

        configs = {
            "configs": {
            "mapred.mapper.class": "org.apache.oozie.example.SampleMapper",
            "mapred.reducer.class": "org.apache.oozie.example.SampleReducer"
            }
        }

        try:

            self._edp_testing('Pig', [{'pig': job_data}], [{'jar': lib_data}])

        #TODO(vrovachev): remove mains after when bug #1237434 will be fixed
            self._edp_testing('Jar', [{'pig': job_data}],
                              [{'jar': job_jar_data}], configs)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )

                message = 'Failure while EDP testing: '
                self.print_error_log(message, e)

#------------------------------MAP REDUCE TESTING------------------------------

        try:

            self._map_reduce_testing(cluster_info)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )

                message = 'Failure while Map Reduce testing: '
                self.print_error_log(message, e)

#---------------------------CHECK SWIFT AVAILABILITY---------------------------

        try:

            self._check_swift_availability(cluster_info)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )

                message = 'Failure during check of Swift availability: '
                self.print_error_log(message, e)

#--------------------------------CLUSTER SCALING-------------------------------

        change_list = [
            {
                'operation': 'resize',
                'info': ['worker-node-tt-dn', 4]
            },
            {
                'operation': 'resize',
                'info': ['worker-node-dn', 0]
            },
            {
                'operation': 'resize',
                'info': ['worker-node-tt', 0]
            },
            {
                'operation': 'add',
                'info': [
                    'new-worker-node-tt', 1, '%s' % node_group_template_tt_id
                ]
            },
            {
                'operation': 'add',
                'info': [
                    'new-worker-node-dn', 1, '%s' % node_group_template_dn_id
                ]
            }
        ]

        try:

            new_cluster_info = self._cluster_scaling(cluster_info, change_list)

        except Exception as e:

            with excutils.save_and_reraise_exception():

                self.delete_objects(
                    cluster_info['cluster_id'], cluster_template_id,
                    node_group_template_id_list
                )

                message = 'Failure while cluster scaling: '
                self.print_error_log(message, e)

        if not self.vanilla_config.SKIP_SCALING_TEST:

#---------------------CLUSTER CONFIG TESTING AFTER SCALING---------------------

            try:

                self._cluster_config_testing(new_cluster_info)

            except Exception as e:

                with excutils.save_and_reraise_exception():

                    self.delete_objects(
                        new_cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )

                    message = 'Failure while cluster config testing after ' \
                              'cluster scaling: '
                    self.print_error_log(message, e)

#-----------------------MAP REDUCE TESTING AFTER SCALING-----------------------

            try:

                self._map_reduce_testing(new_cluster_info)

            except Exception as e:

                with excutils.save_and_reraise_exception():

                    self.delete_objects(
                        new_cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )

                    message = 'Failure while Map Reduce testing after ' \
                              'cluster scaling: '
                    self.print_error_log(message, e)

#--------------------CHECK SWIFT AVAILABILITY AFTER SCALING--------------------

            try:

                self._check_swift_availability(new_cluster_info)

            except Exception as e:

                with excutils.save_and_reraise_exception():

                    self.delete_objects(
                        new_cluster_info['cluster_id'], cluster_template_id,
                        node_group_template_id_list
                    )

                    message = 'Failure during check of Swift availability ' \
                              'after cluster scaling: '
                    self.print_error_log(message, e)

#----------------------------DELETE CREATED OBJECTS----------------------------

        self.delete_objects(
            cluster_info['cluster_id'], cluster_template_id,
            node_group_template_id_list
        )