Esempio n. 1
0
    def test_cleanup_configs(self):
        remote = mock.Mock()
        instance = mock.Mock()

        extra_conf = {'job_cleanup': {
            'valid': True,
            'script': 'script_text',
            'cron': 'cron_text'}}
        instance.node_group.node_processes = ["master"]
        instance.node_group.id = id
        cluster_dict = self._init_cluster_dict('2.2')

        cluster = conductor.cluster_create(context.ctx(), cluster_dict)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
        plugin._push_cleanup_job(remote, cluster, extra_conf, instance)
        remote.write_file_to.assert_called_with(
            '/etc/hadoop/tmp-cleanup.sh',
            'script_text')
        remote.execute_command.assert_called_with(
            'sudo sh -c \'echo "cron_text" > /etc/cron.d/spark-cleanup\'')

        remote.reset_mock()
        instance.node_group.node_processes = ["worker"]
        plugin._push_cleanup_job(remote, cluster, extra_conf, instance)
        self.assertFalse(remote.called)

        remote.reset_mock()
        instance.node_group.node_processes = ["master"]
        extra_conf['job_cleanup']['valid'] = False
        plugin._push_cleanup_job(remote, cluster, extra_conf, instance)
        remote.execute_command.assert_called_with(
            'sudo rm -f /etc/crond.d/spark-cleanup')
Esempio n. 2
0
    def test_validate(self, mock_utils):

        cluster_data = self._get_cluster('cluster', '1.1.0')
        cluster = conductor.cluster_create(context.ctx(), cluster_data)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)

        # number of nimbus nodes != 1 should raise an exception
        fake_ng = mock.Mock()
        fake_ng.count = 0
        mock_ng = mock.Mock(return_value=[fake_ng])
        mock_utils.get_node_groups = mock_ng

        self.assertRaises(ex.RequiredServiceMissingException, plugin.validate,
                          cluster)

        mock_ng.assert_called_once_with(cluster, "nimbus")

        fake_ng.count = 2
        self.assertRaises(ex.InvalidComponentCountException, plugin.validate,
                          cluster)

        mock_ng.assert_called_with(cluster, "nimbus")
        self.assertEqual(2, mock_ng.call_count)

        # no supervisor should raise an exception
        fake_nimbus = mock.Mock()
        fake_nimbus.count = 1

        fake_supervisor = mock.Mock()
        fake_supervisor.count = 0

        mock_ng = mock.Mock(side_effect=[[fake_nimbus], [fake_supervisor]])
        mock_utils.get_node_groups = mock_ng

        self.assertRaises(ex.InvalidComponentCountException, plugin.validate,
                          cluster)

        mock_ng.assert_any_call(cluster, "nimbus")
        mock_ng.assert_any_call(cluster, "supervisor")
        self.assertEqual(2, mock_ng.call_count)

        # one nimbus and one or more supervisors should not raise an exception
        fake_nimbus.count = 1
        fake_supervisor.count = 2

        mock_ng = mock.Mock(side_effect=[[fake_nimbus], [fake_supervisor]])
        mock_utils.get_node_groups = mock_ng

        plugin.validate(cluster)

        mock_ng.assert_any_call(cluster, "nimbus")
        mock_ng.assert_any_call(cluster, "supervisor")
        self.assertEqual(2, mock_ng.call_count)
    def test_plugin_edp_engine(self):
        cluster_dict = {
            'name': 'cluster',
            'plugin_name': 'cdh',
            'hadoop_version': '5.7.0',
            'default_image_id': 'image'}

        cluster = conductor.cluster_create(context.ctx(), cluster_dict)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
        self.assertIsInstance(
            plugin.get_edp_engine(cluster, edp.JOB_TYPE_SPARK),
            edp.PluginsSparkJobEngine)
    def test_edp_calls_hadoop2_create_dir(self, create_dir):
        for version in ['2.7.1']:
            cluster_dict = {
                'name': 'cluster' + version.replace('.', '_'),
                'plugin_name': 'vanilla',
                'hadoop_version': version,
                'default_image_id': 'image'}

            cluster = conductor.cluster_create(context.ctx(), cluster_dict)
            plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
            create_dir.reset_mock()
            plugin.get_edp_engine(cluster, edp.JOB_TYPE_PIG).create_hdfs_dir(
                mock.Mock(), '/tmp')
            self.assertEqual(1, create_dir.call_count)
Esempio n. 5
0
    def test_validate_additional_ng_scaling_raises(self):
        data = [{
            'name': 'master',
            'flavor_id': '42',
            'count': 1,
            'node_processes': ['nimbus']
        }, {
            'name': 'slave',
            'flavor_id': '42',
            'count': 1,
            'node_processes': ['supervisor']
        }, {
            'name': 'zookeeper',
            'flavor_id': '42',
            'count': 1,
            'node_processes': ['zookeeper']
        }, {
            'name': 'master2',
            'flavor_id': '42',
            'count': 0,
            'node_processes': ['nimbus']
        }]

        cluster_data_101 = self._get_cluster('cluster_1.0.1', '1.0.1')
        cluster_data_110 = self._get_cluster('cluster_1.1.0', '1.1.0')
        cluster_data_120 = self._get_cluster('cluster_1.2.0', '1.2')
        cluster_data_101['node_groups'] = data
        cluster_data_110['node_groups'] = data
        cluster_data_120['node_groups'] = data

        clusters = [cluster_data_101, cluster_data_110, cluster_data_120]

        for cluster_data in clusters:
            cluster = conductor.cluster_create(context.ctx(), cluster_data)
            plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
            master_id = [
                node.id for node in cluster.node_groups
                if node.name == 'master2'
            ]
            self.assertRaises(ex.NodeGroupCannotBeScaled,
                              plugin._validate_existing_ng_scaling, cluster,
                              master_id)
    def test_plugin_edp_engine_no_spark(self):
        cluster_dict = {
            'name': 'cluster',
            'plugin_name': 'cdh',
            'hadoop_version': '5.7.0',
            'default_image_id': 'image'}

        job = mock.Mock()
        job.type = edp.JOB_TYPE_SPARK

        cluster = conductor.cluster_create(context.ctx(), cluster_dict)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
        edp_engine = plugin.get_edp_engine(cluster, edp.JOB_TYPE_SPARK)
        with testtools.ExpectedException(
                ex.InvalidComponentCountException,
                value_re="Hadoop cluster should contain 1 "
                         "SPARK_YARN_HISTORY_SERVER component\(s\). Actual "
                         "SPARK_YARN_HISTORY_SERVER count is 0\nError ID: .*"):

            edp_engine.validate_job_execution(cluster, job, mock.Mock())
Esempio n. 7
0
    def _test_engine(self, version, job_type, eng):
        cluster_dict = self._get_cluster('demo', version)

        cluster = conductor.cluster_create(context.ctx(), cluster_dict)
        plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
        self.assertIsInstance(plugin.get_edp_engine(cluster, job_type), eng)