示例#1
0
    def test_validate(self):
        plugin = p.IDHProvider()

        ng_mng = tu.make_ng_dict('mng', 'f1', ['manager'], 1)
        ng_nn = tu.make_ng_dict('nn', 'f1', ['namenode'], 1)
        ng_jt = tu.make_ng_dict('jt', 'f1', ['jobtracker'], 1)
        ng_dn = tu.make_ng_dict('dn', 'f1', ['datanode'], 2)
        ng_tt = tu.make_ng_dict('tt', 'f1', ['tasktracker'], 2)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_nn] + [ng_dn])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1', [ng_mng])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] * 2)
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] + [ng_tt])
        self.assertRaises(g_ex.RequiredServiceMissingException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] + [ng_jt] * 2 + [ng_tt])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)
示例#2
0
    def test_validate(self):
        plugin = p.IDHProvider()

        ng_mng = tu.make_ng_dict('mng', 'f1', ['manager'], 1)
        ng_nn = tu.make_ng_dict('nn', 'f1', ['namenode'], 1)
        ng_rm = tu.make_ng_dict('rm', 'f1', ['resourcemanager'], 1)
        ng_dn = tu.make_ng_dict('dn', 'f1', ['datanode'], 2)
        ng_nm = tu.make_ng_dict('nm', 'f1', ['nodemanager'], 2)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2',
                               [ng_nn] + [ng_dn])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2', [ng_mng])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2',
                               [ng_mng] + [ng_nn] * 2)
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2',
                               [ng_mng] + [ng_nn] + [ng_nm])
        self.assertRaises(g_ex.RequiredServiceMissingException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2',
                               [ng_mng] + [ng_nn] + [ng_rm] * 2 + [ng_rm])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)
示例#3
0
    def test_validate_scaling(self, vls, vla, get_by_id):
        self.vh.pctx['all_confs'] = [TestConfig('HDFS', 'dfs.replication', -1)]
        ng1 = testutils.make_ng_dict('ng1', '40', ['namenode'], 1)
        ng2 = testutils.make_ng_dict('ng2', '41', ['datanode'], 2)
        ng3 = testutils.make_ng_dict('ng3', '42', ['datanode'], 3)
        additional = [ng2['id'], ng3['id']]
        existing = {ng2['id']: 1}
        cluster = testutils.create_cluster('test-cluster', 'tenant1', 'fake',
                                           '0.1', [ng1, ng2, ng3])
        self.vh.validate_scaling(cluster, existing, additional)
        vla.assert_called_once_with(cluster, additional)
        vls.assert_called_once_with(self.vh.pctx, cluster, existing)

        ng4 = testutils.make_ng_dict('ng4', '43', ['datanode', 'zookeeper'], 3)
        ng5 = testutils.make_ng_dict('ng5', '44', ['datanode', 'zookeeper'], 1)
        existing = {ng4['id']: 2}
        additional = {ng5['id']}
        cluster = testutils.create_cluster('test-cluster', 'tenant1', 'fake',
                                           '0.1', [ng1, ng4])

        with testtools.ExpectedException(ex.ClusterCannotBeScaled):
            self.vh.validate_scaling(cluster, existing, {})

        get_by_id.return_value = r.NodeGroupResource(ng5)

        with testtools.ExpectedException(ex.ClusterCannotBeScaled):
            self.vh.validate_scaling(cluster, {}, additional)
    def test_check_cluster_scaling_missing_resource(self, ops,
                                                    m_nova, m_image):
        ops.get_engine_type_and_version.return_value = "heat.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)

        nova = mock.Mock()
        m_nova.return_value = nova
        nova.keypairs.get.side_effect = u._get_keypair
        cluster = tu.create_cluster(
            "cluster1", "tenant1", "fake", "0.1", [ng1],
            status=c_u.CLUSTER_STATUS_ACTIVE,
            sahara_info={"infrastructure_engine": "heat.1.1"},
            id='12321', user_keypair_id='keypair')
        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_exception=ex.NotFoundException,
            expected_message="Requested keypair 'keypair' not found")

        image = mock.Mock()
        m_image.return_value = image
        image.list_registered.return_value = [mock.Mock(id='image1'),
                                              mock.Mock(id='image2')]
        cluster = tu.create_cluster(
            "cluster1", "tenant1", "fake", "0.1", [ng1],
            status=c_u.CLUSTER_STATUS_ACTIVE,
            sahara_info={"infrastructure_engine": "heat.1.1"},
            id='12321', default_image_id='image_id',
            user_keypair_id='test_keypair')
        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Requested image 'image_id' is not registered")
示例#5
0
    def test_validate(self):
        plugin = p.IDHProvider()

        ng_mng = tu.make_ng_dict('mng', 'f1', ['manager'], 1)
        ng_nn = tu.make_ng_dict('nn', 'f1', ['namenode'], 1)
        ng_jt = tu.make_ng_dict('jt', 'f1', ['jobtracker'], 1)
        ng_dn = tu.make_ng_dict('dn', 'f1', ['datanode'], 2)
        ng_tt = tu.make_ng_dict('tt', 'f1', ['tasktracker'], 2)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_nn] + [ng_dn])
        self.assertRaises(g_ex.InvalidComponentCountException, plugin.validate,
                          cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1', [ng_mng])
        self.assertRaises(g_ex.InvalidComponentCountException, plugin.validate,
                          cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] * 2)
        self.assertRaises(g_ex.InvalidComponentCountException, plugin.validate,
                          cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] + [ng_tt])
        self.assertRaises(g_ex.RequiredServiceMissingException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] + [ng_jt] * 2 + [ng_tt])
        self.assertRaises(g_ex.InvalidComponentCountException, plugin.validate,
                          cl)
示例#6
0
    def test_get_jobtracker(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager, self.ng_jobtracker])
        self.assertEqual('jt1', u.get_jobtracker(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager])
        self.assertIsNone(u.get_jobtracker(cl))
示例#7
0
    def test_get_oozie(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
                               [self.ng_manager, self.ng_oozie])
        self.assertEqual('ooz1', u.get_oozie(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
                               [self.ng_manager])
        self.assertIsNone(u.get_oozie(cl))
示例#8
0
    def test_get_hiveserver(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager, self.ng_hiveserver])
        self.assertEqual('hs1', u.get_hiveserver(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager])
        self.assertIsNone(u.get_hiveserver(cl))
示例#9
0
    def test_get_namenode(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager, self.ng_namenode])
        self.assertEqual('nn1', u.get_namenode(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager])
        self.assertIsNone(u.get_namenode(cl))
示例#10
0
    def test_get_hiveserver(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager, self.ng_hiveserver])
        self.assertEqual('hs1', u.get_hiveserver(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager])
        self.assertIsNone(u.get_hiveserver(cl))
示例#11
0
    def test_get_oozie(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
                               [self.ng_manager, self.ng_oozie])
        self.assertEqual('ooz1', u.get_oozie(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
                               [self.ng_manager])
        self.assertIsNone(u.get_oozie(cl))
示例#12
0
    def test_get_namenode(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager, self.ng_namenode])
        self.assertEqual('nn1', u.get_namenode(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager])
        self.assertIsNone(u.get_namenode(cl))
示例#13
0
    def test_get_secondarynamenodes(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager, self.ng_namenode,
                                self.ng_secondarynamenode])
        self.assertEqual('snn1', u.get_secondarynamenode(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager])
        self.assertEqual(None, u.get_secondarynamenode(cl))
示例#14
0
    def test_get_secondarynamenodes(self):
        cl = tu.create_cluster(
            'cl1', 't1', 'vanilla', '1.2.1',
            [self.ng_manager, self.ng_namenode, self.ng_secondarynamenode])
        self.assertEqual('snn1', u.get_secondarynamenodes(cl)[0].instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager])
        self.assertEqual([], u.get_secondarynamenodes(cl))
示例#15
0
    def test_get_secondarynamenodes(self):
        cl = tu.create_cluster(
            'cl1', 't1', 'vanilla', '2.6.0',
            [self.ng_manager, self.ng_namenode, self.ng_secondarynamenode])
        self.assertEqual('snn1', u.get_secondarynamenode(cl).instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager])
        self.assertIsNone(u.get_secondarynamenode(cl))
示例#16
0
 def test_get_nodemanagers(self):
     cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
                            [self.ng_manager, self.ng_nodemanager])
     nodemanagers = u.get_nodemanagers(cl)
     self.assertEqual(2, len(nodemanagers))
     self.assertEqual(
         set(['tt1', 'tt2']),
         set([nodemanagers[0].instance_id, nodemanagers[1].instance_id]))
     cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
                            [self.ng_namenode])
     self.assertEqual([], u.get_nodemanagers(cl))
示例#17
0
    def test_get_datanodes(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager, self.ng_namenode,
                                self.ng_datanode])
        datanodes = u.get_datanodes(cl)
        self.assertEqual(2, len(datanodes))
        self.assertEqual(set(['dn1', 'dn2']),
                         set([datanodes[0].instance_id,
                              datanodes[1].instance_id]))

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
                               [self.ng_manager])
        self.assertEqual([], u.get_datanodes(cl))
示例#18
0
    def test_get_datanodes(self):
        cl = tu.create_cluster(
            'cl1', 't1', 'vanilla', '1.2.1',
            [self.ng_manager, self.ng_namenode, self.ng_datanode])
        datanodes = u.get_datanodes(cl)
        self.assertEqual(2, len(datanodes))
        self.assertEqual(
            set(['dn1', 'dn2']),
            set([datanodes[0].instance_id, datanodes[1].instance_id]))

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager])
        self.assertEqual([], u.get_datanodes(cl))
示例#19
0
    def test_get_tasktrackers(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager, self.ng_jobtracker,
                                self.ng_tasktracker])
        tasktrackers = u.get_tasktrackers(cl)
        self.assertEqual(2, len(tasktrackers))
        self.assertEqual(set(['tt1', 'tt2']),
                         set([tasktrackers[0].instance_id,
                              tasktrackers[1].instance_id]))

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager])
        self.assertEqual([], u.get_tasktrackers(cl))
示例#20
0
    def test_get_tasktrackers(self):
        cl = tu.create_cluster(
            'cl1', 't1', 'vanilla', '1.2.1',
            [self.ng_manager, self.ng_jobtracker, self.ng_tasktracker])
        tasktrackers = u.get_tasktrackers(cl)
        self.assertEqual(2, len(tasktrackers))
        self.assertEqual(
            set(['tt1', 'tt2']),
            set([tasktrackers[0].instance_id, tasktrackers[1].instance_id]))

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager])
        self.assertEqual([], u.get_tasktrackers(cl))
    def test_check_cluster_scaling_resize_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1",
                                    "tenant1",
                                    "vanilla",
                                    "1.2.1", [ng1],
                                    status='Validating',
                                    id='12321')

        self._assert_check_scaling(data={},
                                   cluster=cluster,
                                   expected_message="Cluster cannot be scaled "
                                   "not in 'Active' "
                                   "status. Cluster status: "
                                   "Validating")

        cluster = tu.create_cluster("cluster1",
                                    "tenant1",
                                    "vanilla",
                                    "1.2.1", [ng1],
                                    status='Active',
                                    id='12321')
        data = {
            'resize_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }],
        }
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message="Cluster doesn't contain "
                                   "node group with name 'a'")
        data.update({
            'resize_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }, {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }]
        })
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message='Duplicates in node '
                                   'group names are detected',
                                   expected_exception=ex.InvalidDataException)
    def test_check_cluster_scaling_resize_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1",
                                    "tenant1",
                                    "fake",
                                    "0.1", [ng1],
                                    status=c_u.CLUSTER_STATUS_VALIDATING,
                                    id='12321')

        self._assert_check_scaling(data={},
                                   cluster=cluster,
                                   expected_message="Cluster cannot be scaled "
                                   "not in '" + c_u.CLUSTER_STATUS_ACTIVE +
                                   "' status. Cluster status: " +
                                   c_u.CLUSTER_STATUS_VALIDATING)

        cluster = tu.create_cluster("cluster1",
                                    "tenant1",
                                    "fake",
                                    "0.1", [ng1],
                                    status=c_u.CLUSTER_STATUS_ACTIVE,
                                    id='12321')
        data = {
            'resize_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }],
        }
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message="Cluster doesn't contain "
                                   "node group with name 'a'")
        data.update({
            'resize_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }, {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }]
        })
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message=self.duplicates_detected,
                                   expected_exception=ex.InvalidDataException)
示例#23
0
    def test_check_edp_job_support_spark(self, get_job, get_cluster):
        # utils.start_patch will construct a vanilla cluster as a
        # default for get_cluster, but we want a Spark cluster.
        # So, we'll make our own.

        # Note that this means we cannot use assert_create_object_validation()
        # because it calls start_patch() and will override our setting
        job = mock.Mock(type=edp.JOB_TYPE_SPARK, mains=["main"], interface=[])
        get_job.return_value = job
        ng = tu.make_ng_dict('master',
                             42, [],
                             1,
                             instances=[tu.make_inst_dict('id', 'name')])
        get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "spark", "2.2", [ng])

        # Everything is okay, spark cluster supports EDP by default
        # because cluster requires a master and slaves >= 1
        wrap_it(
            data={
                "cluster_id": uuidutils.generate_uuid(),
                "job_configs": {
                    "configs": {
                        "edp.java.main_class": "org.me.class"
                    }
                }
            })
示例#24
0
    def _get_cluster(self):
        i1 = tu.make_inst_dict('id_1', 'instance_1', '1.1.1.1')
        master_proc = [
            yarn.RESOURCE_MANAGER.ui_name,
            yarn.NODE_MANAGER.ui_name,
            yarn.HISTORY_SERVER.ui_name,
            maprfs.CLDB.ui_name,
            maprfs.FILE_SERVER.ui_name,
            oozie.OOZIE.ui_name,
            management.ZOOKEEPER.ui_name,
        ]

        master_ng = tu.make_ng_dict('master', 'large', master_proc, 1, [i1])
        cluster_configs = {
            'Service': {
                'key': 'value',
                'Service Version': '1.1',
            },
        }

        cluster = tu.create_cluster(
            name='test_cluster',
            tenant='large',
            plugin='mapr',
            version='4.0.1.mrv1',
            node_groups=[master_ng],
            cluster_configs=cluster_configs,
        )
        self.ng = cluster.node_groups[0]
        self.instance = self.ng.instances[0]
        return cluster
示例#25
0
    def test_load_template_with_anti_affinity_single_ng(self):
        """This test checks Heat cluster template with Neutron enabled
           and anti-affinity feature enabled for single node process
           in single node group.
        """

        ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 2,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=2)
        cluster = tu.create_cluster("cluster", "tenant1", "general",
                                    "1.2.1", [ng1, ng2],
                                    user_keypair_id='user_key',
                                    neutron_management_network='private_net',
                                    default_image_id='1',
                                    anti_affinity=['datanode'], image_id=None)
        aa_heat_template = h.ClusterTemplate(cluster)
        aa_heat_template.add_node_group_extra(ng1['id'], 1,
                                              get_ud_generator('line1\nline2'))
        aa_heat_template.add_node_group_extra(ng2['id'], 2,
                                              get_ud_generator('line2\nline3'))

        self.override_config("use_neutron", True)
        main_template = h._load_template(
            'main.heat', {'resources':
                          aa_heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_aa.heat")))
示例#26
0
    def test_edp_main_class_spark(self, job_get, cluster_get):
        job_get.return_value = mock.Mock(type=edp.JOB_TYPE_SPARK, interface=[])
        ng = tu.make_ng_dict('master',
                             42, ['namenode'],
                             1,
                             instances=[tu.make_inst_dict('id', 'name')])
        cluster_get.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "spark", "1.0.0", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "job_configs": {
                    "configs": {},
                    "params": {},
                    "args": []
                }
            },
            bad_req_i=(1, "INVALID_DATA", "%s job must "
                       "specify edp.java.main_class" % edp.JOB_TYPE_SPARK))

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "job_configs": {
                    "configs": {
                        "edp.java.main_class": "org.me.myclass"
                    },
                    "params": {},
                    "args": []
                }
            })
示例#27
0
    def test_data_sources_differ(self, get_job, get_data_source, get_cluster):
        get_job.return_value = mock.Mock(type=edp.JOB_TYPE_MAPREDUCE_STREAMING,
                                         libs=[],
                                         interface=[])

        ds1_id = six.text_type(uuid.uuid4())
        ds2_id = six.text_type(uuid.uuid4())

        data_sources = {
            ds1_id: mock.Mock(type="swift", url="http://swift/test"),
            ds2_id: mock.Mock(type="swift", url="http://swift/test2"),
        }

        get_data_source.side_effect = lambda ctx, x: data_sources[x]

        ng = tu.make_ng_dict('master',
                             42, ['oozie'],
                             1,
                             instances=[tu.make_inst_dict('id', 'name')])
        get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "vanilla", "2.6.0", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {
                    "configs": {
                        "edp.streaming.mapper": "/bin/cat",
                        "edp.streaming.reducer": "/usr/bin/wc"
                    },
                    "params": {},
                    "args": []
                }
            })

        data_sources[ds2_id].url = "http://swift/test"

        err_msg = ("Provided input and output DataSources reference the "
                   "same location: %s" % data_sources[ds2_id].url)

        self._assert_create_object_validation(data={
            "cluster_id":
            six.text_type(uuid.uuid4()),
            "input_id":
            ds1_id,
            "output_id":
            ds2_id,
            "job_configs": {
                "configs": {
                    "edp.streaming.mapper": "/bin/cat",
                    "edp.streaming.reducer": "/usr/bin/wc"
                },
                "params": {},
                "args": []
            }
        },
                                              bad_req_i=(1, "INVALID_DATA",
                                                         err_msg))
示例#28
0
    def test_load_template_use_neutron(self):
        """This test checks Heat cluster template with Neutron enabled.
           Two NodeGroups used: 'master' with Ephemeral drive attached and
           'worker' with 2 attached volumes 10GB size each
        """

        ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=2, volumes_size=10, id=2)
        cluster = tu.create_cluster("cluster", "tenant1", "general",
                                    "1.2.1", [ng1, ng2],
                                    user_keypair_id='user_key',
                                    neutron_management_network='private_net',
                                    default_image_id='1', anti_affinity=[],
                                    image_id=None)
        heat_template = h.ClusterTemplate(cluster)
        heat_template.add_node_group_extra(ng1['id'], 1,
                                           get_ud_generator('line1\nline2'))
        heat_template.add_node_group_extra(ng2['id'], 1,
                                           get_ud_generator('line2\nline3'))

        self.override_config("use_neutron", True)
        main_template = h._load_template(
            'main.heat', {'resources':
                          heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_use_neutron.heat")))
示例#29
0
 def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=[]):
     return tu.create_cluster("cluster", "tenant1", "general",
                              "2.6.0", [ng1, ng2],
                              user_keypair_id='user_key',
                              neutron_management_network=mng_network,
                              default_image_id='1', image_id=None,
                              anti_affinity=anti_affinity)
示例#30
0
    def test_load_template_with_anti_affinity_single_ng(self):
        """This test checks Heat cluster template with Neutron enabled
           and anti-affinity feature enabled for single node process
           in single node group.
        """

        ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 2,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=2)
        cluster = tu.create_cluster("cluster", "tenant1", "general",
                                    "1.2.1", [ng1, ng2],
                                    user_keypair_id='user_key',
                                    neutron_management_network='private_net',
                                    default_image_id='1',
                                    anti_affinity=['datanode'], image_id=None)
        aa_heat_template = h.ClusterTemplate(cluster)
        aa_heat_template.add_node_group_extra(ng1['id'], 1,
                                              get_ud_generator('line1\nline2'))
        aa_heat_template.add_node_group_extra(ng2['id'], 2,
                                              get_ud_generator('line2\nline3'))

        self.override_config("use_neutron", True)
        main_template = h._load_template(
            'main.heat', {'resources':
                          aa_heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_aa.heat")))
示例#31
0
    def test_edp_main_class_spark(self, job_get, cluster_get):
        job_get.return_value = mock.Mock(type=edp.JOB_TYPE_SPARK,
                                         interface=[])
        ng = tu.make_ng_dict('master', 42, ['namenode'], 1,
                             instances=[tu.make_inst_dict('id', 'name')])
        cluster_get.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "spark", "1.0.0", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "job_configs": {"configs": {},
                                "params": {},
                                "args": []}
            },
            bad_req_i=(1, "INVALID_DATA",
                          "%s job must "
                          "specify edp.java.main_class" % edp.JOB_TYPE_SPARK))

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "job_configs": {
                    "configs": {
                        "edp.java.main_class": "org.me.myclass"},
                    "params": {},
                    "args": []}
            })
示例#32
0
 def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=[]):
     return tu.create_cluster("cluster", "tenant1", "general",
                              "1.2.1", [ng1, ng2],
                              user_keypair_id='user_key',
                              neutron_management_network=mng_network,
                              default_image_id='1', image_id=None,
                              anti_affinity=anti_affinity)
示例#33
0
 def _get_context(self):
     i1 = tu.make_inst_dict('id_1', 'instance_1', MANAGEMENT_IP)
     i1['internal_ip'] = INTERNAL_IP
     master_proc = [
         yarn.RESOURCE_MANAGER.ui_name,
         yarn.NODE_MANAGER.ui_name,
         yarn.HISTORY_SERVER.ui_name,
         maprfs.CLDB.ui_name,
         maprfs.FILE_SERVER.ui_name,
         oozie.OOZIE.ui_name,
         management.ZOOKEEPER.ui_name,
     ]
     master_ng = tu.make_ng_dict('master', 'large', master_proc, 1, [i1])
     cluster_configs = {
         'Service': {
             'key': 'value',
             'Service Version': '1.1',
         },
         'Oozie': {
             'Oozie Version': '4.2.0',
         }
     }
     cluster = tu.create_cluster(
         name='test_cluster',
         tenant='large',
         plugin='mapr',
         version='5.2.0.mrv2',
         node_groups=[master_ng],
         cluster_configs=cluster_configs,
     )
     self.ng = cluster.node_groups[0]
     self.instance = self.ng.instances[0]
     return cc.Context(cluster, handler.VersionHandler())
 def _get_context(self):
     i1 = tu.make_inst_dict('id_1', 'instance_1', '1.1.1.1')
     master_proc = [
         yarn.RESOURCE_MANAGER.ui_name,
         yarn.NODE_MANAGER.ui_name,
         yarn.HISTORY_SERVER.ui_name,
         maprfs.CLDB.ui_name,
         maprfs.FILE_SERVER.ui_name,
         oozie.OOZIE.ui_name,
         management.ZOOKEEPER.ui_name,
     ]
     master_ng = tu.make_ng_dict('master', 'large', master_proc, 1, [i1])
     cluster_configs = {
         'Service': {
             'key': 'value',
             'Service Version': '1.1',
         },
     }
     cluster = tu.create_cluster(
         name='test_cluster',
         tenant='large',
         plugin='mapr',
         version='4.0.1.mrv1',
         node_groups=[master_ng],
         cluster_configs=cluster_configs,
     )
     self.ng = cluster.node_groups[0]
     self.instance = self.ng.instances[0]
     return cc.Context(cluster, handler.VersionHandler())
    def test_check_cluster_scaling_add_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("test-cluster",
                                    "tenant",
                                    "vanilla",
                                    "1.2.1", [ng1],
                                    status='Active',
                                    id='12321')
        data = {
            'add_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }, {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }]
        }
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message='Duplicates in node '
                                   'group names are detected',
                                   expected_exception=ex.InvalidDataException)
        data = {
            'add_node_groups': [
                {
                    'name': 'ng',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
            ]
        }
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message="Can't add new nodegroup. "
                                   "Cluster already has nodegroup "
                                   "with name 'ng'")

        data = {
            'add_node_groups': [
                {
                    'name': 'very-very-very-very-very-very-long-ng-name',
                    'flavor_id': '42',
                    'node_processes': ['namenode'],
                    'count': 10
                },
            ]
        }
        patchers = u.start_patch()
        self._assert_check_scaling(
            data=data,
            cluster=cluster,
            expected_message="Composite hostname test-cluster-very-"
            "very-very-very-very-very-long-ng-name-"
            "010.novalocal in provisioned cluster exceeds "
            "maximum limit 64 characters",
            expected_exception=ex.InvalidDataException)
        u.stop_patch(patchers)
    def test_check_cluster_scaling_resize_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
                                    [ng1],
                                    status=c_u.CLUSTER_STATUS_VALIDATING,
                                    id='12321')

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster cannot be scaled "
                             "not in '" + c_u.CLUSTER_STATUS_ACTIVE +
            "' status. Cluster status: " +
            c_u.CLUSTER_STATUS_VALIDATING)

        cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
                                    [ng1], status=c_u.CLUSTER_STATUS_ACTIVE,
                                    id='12321')
        data = {
            'resize_node_groups': [
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                }
            ],
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Cluster doesn't contain "
                             "node group with name 'a'")
        data.update({'resize_node_groups': [
            {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            },
            {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }
        ]})
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message=self.duplicates_detected,
            expected_exception=ex.InvalidDataException)
    def test_check_cluster_scaling_add_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("test-cluster", "tenant1", "fake",
                                    "0.1",
                                    [ng1], status=c_u.CLUSTER_STATUS_ACTIVE,
                                    id='12321')
        data = {
            'add_node_groups': [
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                }
            ]
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message=self.duplicates_detected,
            expected_exception=ex.InvalidDataException)
        data = {
            'add_node_groups': [
                {
                    'name': 'ng',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
            ]
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Can't add new nodegroup. "
                             "Cluster already has nodegroup "
                             "with name 'ng'")

        data = {
            'add_node_groups': [
                {
                    'name': 'very-very-very-very-very-very-long-ng-name',
                    'flavor_id': '42',
                    'node_processes': ['namenode'],
                    'count': 10
                },
            ]
        }
        patchers = u.start_patch()
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Composite hostname test-cluster-very-"
                             "very-very-very-very-very-long-ng-name-"
                             "010.novalocal in provisioned cluster exceeds "
                             "maximum limit 64 characters",
            expected_exception=ex.InvalidDataException)
        u.stop_patch(patchers)
示例#38
0
    def _create_cluster(self, *args, **kwargs):
        lst = []
        for i in range(0, len(args)):
            self.ng[i]['count'] = args[i]
            lst.append(self.ng[i])

        return tu.create_cluster("cluster1", "tenant1", "vanilla",
                                 "2.6.0", lst, **kwargs)
示例#39
0
    def _create_cluster(self, *args, **kwargs):
        lst = []
        for i in range(0, len(args)):
            self.ng[i]['count'] = args[i]
            lst.append(self.ng[i])

        return tu.create_cluster("cluster1", "tenant1", "vanilla", "2.7.1",
                                 lst, **kwargs)
示例#40
0
def get_fake_cluster_with_process(processes=None,
                                  provided_ng_list=None, **kwargs):
    processes = processes or {}
    provided_ng_list = provided_ng_list or []
    inst_counter = itertools.count(start=0)
    ng_counter = itertools.count(start=0)
    ng_id_counter = itertools.count(start=0)

    # default
    mng_ng = ('manager_ng', 1, ['CLOUDERA_MANAGER'], 1)

    mst_ng = ('master_ng', 1, ['HDFS_NAMENODE',
                               'HDFS_SECONDARYNAMENODE',
                               'YARN_RESOURCEMANAGER',
                               'YARN_JOBHISTORY',
                               ], 1)

    wkrs_ng = ('worker_ng', 1, ['HDFS_DATANODE',
                                'YARN_NODEMANAGER'], 3)

    basic_ng_list = [mng_ng, mst_ng, wkrs_ng]

    # if in default_ng_list, change it
    if 'CLOUDERA_MANAGER' in processes:
        if processes['CLOUDERA_MANAGER'] == 0:
            basic_ng_list.remove(mng_ng)
        else:
            processes['CLOUDERA_MANAGER'] -= 1

    for process in mst_ng[2]:
        if process in processes:
            if processes[process] == 0:
                mst_ng[2].remove(process)
            else:
                processes[process] -= 1

    # only consider processes set to 0
    for process in wkrs_ng[2]:
        if process in processes:
            if processes[process] == 0:
                wkrs_ng[2].remove(process)

    other_ng_list = []
    for process, count in six.iteritems(processes):
        if count:
            ng = ('service_ng{0}'.format(six.next(ng_counter)),
                  1, [process], count)
            other_ng_list.append(ng)

    ng_list = basic_ng_list + other_ng_list + provided_ng_list

    ng_dict_list = [make_ng_dict_with_inst(
        inst_counter, *args,
        id="ng_id{0}".format(six.next(ng_id_counter)))
        for args in ng_list]

    return tu.create_cluster('test_cluster', 1, 'cdh',
                             '5', ng_dict_list, **kwargs)
def get_fake_cluster_with_process(processes=None,
                                  provided_ng_list=None, **kwargs):
    processes = processes or {}
    provided_ng_list = provided_ng_list or []
    inst_counter = itertools.count(start=0)
    ng_counter = itertools.count(start=0)
    ng_id_counter = itertools.count(start=0)

    # default
    mng_ng = ('manager_ng', 1, ['CLOUDERA_MANAGER'], 1)

    mst_ng = ('master_ng', 1, ['HDFS_NAMENODE',
                               'HDFS_SECONDARYNAMENODE',
                               'YARN_RESOURCEMANAGER',
                               'YARN_JOBHISTORY',
                               ], 1)

    wkrs_ng = ('worker_ng', 1, ['HDFS_DATANODE',
                                'YARN_NODEMANAGER'], 3)

    basic_ng_list = [mng_ng, mst_ng, wkrs_ng]

    # if in default_ng_list, change it
    if 'CLOUDERA_MANAGER' in processes:
        if processes['CLOUDERA_MANAGER'] == 0:
            basic_ng_list.remove(mng_ng)
        else:
            processes['CLOUDERA_MANAGER'] -= 1

    for process in mst_ng[2]:
        if process in processes:
            if processes[process] == 0:
                mst_ng[2].remove(process)
            else:
                processes[process] -= 1

    # only consider processes set to 0
    for process in wkrs_ng[2]:
        if process in processes:
            if processes[process] == 0:
                wkrs_ng[2].remove(process)

    other_ng_list = []
    for process, count in six.iteritems(processes):
        if count:
            ng = ('service_ng{0}'.format(six.next(ng_counter)),
                  1, [process], count)
            other_ng_list.append(ng)

    ng_list = basic_ng_list + other_ng_list + provided_ng_list

    ng_dict_list = [make_ng_dict_with_inst(
        inst_counter, *args,
        id="ng_id{0}".format(six.next(ng_id_counter)))
        for args in ng_list]

    return tu.create_cluster('test_cluster', 1, 'cdh',
                             '5', ng_dict_list, **kwargs)
示例#42
0
    def _validate_case(self, *args):
        lst = []
        for i in range(0, len(args)):
            self.ng[i]['count'] = args[i]
            lst.append(self.ng[i])

        cl = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1", lst)

        self.pl.validate(cl)
示例#43
0
    def test_check_heat_cluster_scaling_missing_engine(self, engine_version):
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1",
                                    [ng1], status='Active', id='12321')

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created before Juno release can't be "
                             "scaled with heat.1.1 engine")
    def test_check_cluster_scaling_add_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("test-cluster", "tenant", "vanilla",
                                    "1.2.1", [ng1], status='Active',
                                    id='12321')
        data = {
            'add_node_groups': [
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                }
            ]
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message='Duplicates in node '
                             'group names are detected')
        data = {
            'add_node_groups': [
                {
                    'name': 'ng',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
            ]
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Can't add new nodegroup. "
                             "Cluster already has nodegroup "
                             "with name 'ng'")

        data = {
            'add_node_groups': [
                {
                    'name': 'very-very-very-very-very-very-long-ng-name',
                    'flavor_id': '42',
                    'node_processes': ['namenode'],
                    'count': 10
                },
            ]
        }
        patchers = u.start_patch()
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Composite hostname test-cluster-very-"
                             "very-very-very-very-very-long-ng-name-"
                             "010.novalocal in provisioned cluster exceeds "
                             "maximum limit 64 characters")
        u.stop_patch(patchers)
    def test_check_cluster_scaling_resize_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1",
                                    [ng1], status='Validating', id='12321')

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster cannot be scaled "
                             "not in 'Active' "
                             "status. Cluster status: "
                             "Validating")

        cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1",
                                    [ng1], status='Active', id='12321')
        data = {
            'resize_node_groups': [
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                }
            ],
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Cluster doesn't contain "
                             "node group with name 'a'")
        data.update({'resize_node_groups': [
            {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            },
            {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }
        ]})
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message='Duplicates in node '
                             'group names are detected')
示例#46
0
 def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=None,
                   domain_name=None):
     return tu.create_cluster("cluster", "tenant1", "general",
                              "2.6.0", [ng1, ng2],
                              user_keypair_id='user_key',
                              neutron_management_network=mng_network,
                              default_image_id='1', image_id=None,
                              anti_affinity=anti_affinity or [],
                              domain_name=domain_name,
                              anti_affinity_ratio=1)
    def test_check_heat_cluster_scaling_missing_engine(self, ops):
        ops.get_engine_type_and_version.return_value = "heat.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
                                    [ng1], status='Active', id='12321')

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created before Juno release can't be "
                             "scaled with heat.1.1 engine")
示例#48
0
    def test_check_cluster_scaling_missing_resource(self, ops, m_nova,
                                                    m_image):
        ops.get_engine_type_and_version.return_value = "heat.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)

        nova = mock.Mock()
        m_nova.return_value = nova
        nova.keypairs.get.side_effect = u._get_keypair
        cluster = tu.create_cluster(
            "cluster1",
            "tenant1",
            "fake",
            "0.1", [ng1],
            status=c_u.CLUSTER_STATUS_ACTIVE,
            sahara_info={"infrastructure_engine": "heat.1.1"},
            id='12321',
            user_keypair_id='keypair')
        self._assert_check_scaling(
            data={},
            cluster=cluster,
            expected_exception=ex.NotFoundException,
            expected_message="Requested keypair 'keypair' not found")

        image = mock.Mock()
        m_image.return_value = image
        image.list_registered.return_value = [
            mock.Mock(id='image1'),
            mock.Mock(id='image2')
        ]
        cluster = tu.create_cluster(
            "cluster1",
            "tenant1",
            "fake",
            "0.1", [ng1],
            status=c_u.CLUSTER_STATUS_ACTIVE,
            sahara_info={"infrastructure_engine": "heat.1.1"},
            id='12321',
            default_image_id='image_id',
            user_keypair_id='test_keypair')
        self._assert_check_scaling(
            data={},
            cluster=cluster,
            expected_message="Requested image 'image_id' is not registered")
    def test_cluster_delete_when_protected(self, get_cluster_p):
        cluster = tu.create_cluster("cluster1", "tenant1", "fake",
                                    "0.1", ['ng1'], is_protected=True)
        get_cluster_p.return_value = cluster

        with testtools.ExpectedException(ex.DeletionFailed):
            try:
                c_val.check_cluster_delete(cluster.id)
            except ex.DeletionFailed as e:
                self.assert_protected_resource_exception(e)
                raise e
示例#50
0
    def test_check_heat_cluster_scaling_missing_engine(self, ops):
        ops.get_engine_type_and_version.return_value = "heat.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
                                    [ng1], status=c_u.CLUSTER_STATUS_ACTIVE,
                                    id='12321')

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created before Juno release can't be "
                             "scaled with heat.1.1 engine")
示例#51
0
    def test_check_cluster_scaling_wrong_engine(self, engine_version):
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster(
            "cluster1", "tenant1", "vanilla", "1.2.1", [ng1],
            status='Active', id='12321',
            sahara_info={"infrastructure_engine": "heat.1.1"})

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created with heat.1.1 infrastructure "
                             "engine can't be scaled with direct.1.1 engine")
    def test_public_cluster_delete_from_another_tenant(self, get_cluster_p):
        cluster = tu.create_cluster("cluster1", "tenant2", "fake",
                                    "0.1", ['ng1'], is_public=True)
        get_cluster_p.return_value = cluster

        with testtools.ExpectedException(ex.DeletionFailed):
            try:
                c_val.check_cluster_delete(cluster.id)
            except ex.DeletionFailed as e:
                self.assert_created_in_another_tenant_exception(e)
                raise e
    def test_check_cluster_scaling_wrong_engine(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster(
            "cluster1", "tenant1", "fake", "0.1", [ng1],
            status=c_u.CLUSTER_STATUS_ACTIVE, id='12321',
            sahara_info={"infrastructure_engine": "heat.1.1"})

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created with heat.1.1 infrastructure "
                             "engine can't be scaled with direct.1.1 engine")
    def test_public_cluster_update_from_another_tenant(self, get_cluster_p):
        cluster = tu.create_cluster("cluster1", "tenant_2", "fake",
                                    "0.1", ['ng1'], is_public=True)
        get_cluster_p.return_value = cluster

        # cluster can't be updated from another tenant
        with testtools.ExpectedException(ex.UpdateFailedException):
            try:
                c_val.check_cluster_update(cluster.id, {'name': 'new'})
            except ex.UpdateFailedException as e:
                self.assert_created_in_another_tenant_exception(e)
                raise e
示例#55
0
    def test_load_template_with_anti_affinity_single_ng(self):
        """Checks Heat cluster template with Neutron enabled.

        Checks also anti-affinity feature enabled for single node process
        in single node group.
        """

        ng1 = tu.make_ng_dict(
            "master",
            42,
            ["namenode"],
            1,
            floating_ip_pool="floating",
            image_id=None,
            volumes_per_node=0,
            volumes_size=0,
            id=1,
        )
        ng2 = tu.make_ng_dict(
            "worker",
            42,
            ["datanode"],
            2,
            floating_ip_pool="floating",
            image_id=None,
            volumes_per_node=0,
            volumes_size=0,
            id=2,
        )
        cluster = tu.create_cluster(
            "cluster",
            "tenant1",
            "general",
            "1.2.1",
            [ng1, ng2],
            user_keypair_id="user_key",
            neutron_management_network="private_net",
            default_image_id="1",
            anti_affinity=["datanode"],
            image_id=None,
        )
        aa_heat_template = h.ClusterTemplate(cluster)
        aa_heat_template.add_node_group_extra(ng1["id"], 1, get_ud_generator("line1\nline2"))
        aa_heat_template.add_node_group_extra(ng2["id"], 2, get_ud_generator("line2\nline3"))

        self.override_config("use_neutron", True)
        main_template = h._load_template("main.heat", {"resources": aa_heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text("tests/unit/resources/" "test_serialize_resources_aa.heat")),
        )
示例#56
0
    def test_data_sources_differ(self, get_job, get_data_source, get_cluster):
        get_job.return_value = mock.Mock(
            type=edp.JOB_TYPE_MAPREDUCE_STREAMING, libs=[], interface=[])

        ds1_id = six.text_type(uuid.uuid4())
        ds2_id = six.text_type(uuid.uuid4())

        data_sources = {
            ds1_id: mock.Mock(type="swift", url="http://swift/test"),
            ds2_id: mock.Mock(type="swift", url="http://swift/test2"),
        }

        get_data_source.side_effect = lambda ctx, x: data_sources[x]

        ng = tu.make_ng_dict('master', 42, ['oozie'], 1,
                             instances=[tu.make_inst_dict('id', 'name')])
        get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "vanilla", "2.7.1", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {
                    "configs": {
                        "edp.streaming.mapper": "/bin/cat",
                        "edp.streaming.reducer": "/usr/bin/wc"},
                    "params": {},
                    "job_execution_info": {},
                    "args": []}
            })

        data_sources[ds2_id].url = "http://swift/test"

        err_msg = ("Provided input and output DataSources reference the "
                   "same location: %s" % data_sources[ds2_id].url)

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {
                    "configs": {
                        "edp.streaming.mapper": "/bin/cat",
                        "edp.streaming.reducer": "/usr/bin/wc"},
                    "params": {},
                    "job_execution_info": {},
                    "args": []}
            },
            bad_req_i=(1, "INVALID_DATA", err_msg))