Esempio n. 1
0
    def test_validate_scaling(self, vls, vla, get_by_id):
        self.vh.pctx['all_confs'] = [TestConfig('HDFS', 'dfs.replication', -1)]
        ng1 = testutils.make_ng_dict('ng1', '40', ['namenode'], 1)
        ng2 = testutils.make_ng_dict('ng2', '41', ['datanode'], 2)
        ng3 = testutils.make_ng_dict('ng3', '42', ['datanode'], 3)
        additional = [ng2['id'], ng3['id']]
        existing = {ng2['id']: 1}
        cluster = testutils.create_cluster('test-cluster', 'tenant1', 'fake',
                                           '0.1', [ng1, ng2, ng3])
        self.vh.validate_scaling(cluster, existing, additional)
        vla.assert_called_once_with(cluster, additional)
        vls.assert_called_once_with(self.vh.pctx, cluster, existing)

        ng4 = testutils.make_ng_dict('ng4', '43', ['datanode', 'zookeeper'], 3)
        ng5 = testutils.make_ng_dict('ng5', '44', ['datanode', 'zookeeper'], 1)
        existing = {ng4['id']: 2}
        additional = {ng5['id']}
        cluster = testutils.create_cluster('test-cluster', 'tenant1', 'fake',
                                           '0.1', [ng1, ng4])

        with testtools.ExpectedException(ex.ClusterCannotBeScaled):
            self.vh.validate_scaling(cluster, existing, {})

        get_by_id.return_value = r.NodeGroupResource(ng5)

        with testtools.ExpectedException(ex.ClusterCannotBeScaled):
            self.vh.validate_scaling(cluster, {}, additional)
Esempio n. 2
0
    def test_load_template_use_neutron(self):
        """This test checks Heat cluster template with Neutron enabled.
           Two NodeGroups used: 'master' with Ephemeral drive attached and
           'worker' with 2 attached volumes 10GB size each
        """

        ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=2, volumes_size=10, id=2)
        cluster = tu.create_cluster("cluster", "tenant1", "general",
                                    "1.2.1", [ng1, ng2],
                                    user_keypair_id='user_key',
                                    neutron_management_network='private_net',
                                    default_image_id='1', anti_affinity=[],
                                    image_id=None)
        heat_template = h.ClusterTemplate(cluster)
        heat_template.add_node_group_extra(ng1['id'], 1,
                                           get_ud_generator('line1\nline2'))
        heat_template.add_node_group_extra(ng2['id'], 1,
                                           get_ud_generator('line2\nline3'))

        self.override_config("use_neutron", True)
        main_template = h._load_template(
            'main.heat', {'resources':
                          heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_use_neutron.heat")))
Esempio n. 3
0
    def test_load_template_with_anti_affinity_single_ng(self):
        """This test checks Heat cluster template with Neutron enabled
           and anti-affinity feature enabled for single node process
           in single node group.
        """

        ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 2,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=2)
        cluster = tu.create_cluster("cluster", "tenant1", "general",
                                    "1.2.1", [ng1, ng2],
                                    user_keypair_id='user_key',
                                    neutron_management_network='private_net',
                                    default_image_id='1',
                                    anti_affinity=['datanode'], image_id=None)
        aa_heat_template = h.ClusterTemplate(cluster)
        aa_heat_template.add_node_group_extra(ng1['id'], 1,
                                              get_ud_generator('line1\nline2'))
        aa_heat_template.add_node_group_extra(ng2['id'], 2,
                                              get_ud_generator('line2\nline3'))

        self.override_config("use_neutron", True)
        main_template = h._load_template(
            'main.heat', {'resources':
                          aa_heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_aa.heat")))
Esempio n. 4
0
    def test_validate(self):
        plugin = p.IDHProvider()

        ng_mng = tu.make_ng_dict('mng', 'f1', ['manager'], 1)
        ng_nn = tu.make_ng_dict('nn', 'f1', ['namenode'], 1)
        ng_jt = tu.make_ng_dict('jt', 'f1', ['jobtracker'], 1)
        ng_dn = tu.make_ng_dict('dn', 'f1', ['datanode'], 2)
        ng_tt = tu.make_ng_dict('tt', 'f1', ['tasktracker'], 2)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_nn] + [ng_dn])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1', [ng_mng])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] * 2)
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] + [ng_tt])
        self.assertRaises(g_ex.RequiredServiceMissingException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] + [ng_jt] * 2 + [ng_tt])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)
Esempio n. 5
0
    def test_validate(self):
        plugin = p.IDHProvider()

        ng_mng = tu.make_ng_dict('mng', 'f1', ['manager'], 1)
        ng_nn = tu.make_ng_dict('nn', 'f1', ['namenode'], 1)
        ng_rm = tu.make_ng_dict('rm', 'f1', ['resourcemanager'], 1)
        ng_dn = tu.make_ng_dict('dn', 'f1', ['datanode'], 2)
        ng_nm = tu.make_ng_dict('nm', 'f1', ['nodemanager'], 2)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2',
                               [ng_nn] + [ng_dn])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2', [ng_mng])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2',
                               [ng_mng] + [ng_nn] * 2)
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2',
                               [ng_mng] + [ng_nn] + [ng_nm])
        self.assertRaises(g_ex.RequiredServiceMissingException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '3.0.2',
                               [ng_mng] + [ng_nn] + [ng_rm] * 2 + [ng_rm])
        self.assertRaises(g_ex.InvalidComponentCountException,
                          plugin.validate, cl)
Esempio n. 6
0
 def _make_node_groups(self, floating_ip_pool=None, volume_type=None):
     ng1 = tu.make_ng_dict('master',
                           42, ['namenode'],
                           1,
                           floating_ip_pool=floating_ip_pool,
                           image_id=None,
                           volumes_per_node=0,
                           volumes_size=0,
                           id="1",
                           image_username='******',
                           volume_type=None,
                           boot_from_volume=False,
                           auto_security_group=True)
     ng2 = tu.make_ng_dict('worker',
                           42, ['datanode'],
                           1,
                           floating_ip_pool=floating_ip_pool,
                           image_id=None,
                           volumes_per_node=2,
                           volumes_size=10,
                           id="2",
                           image_username='******',
                           volume_type=volume_type,
                           boot_from_volume=False,
                           auto_security_group=True)
     return ng1, ng2
Esempio n. 7
0
    def test_validate(self):
        plugin = p.IDHProvider()

        ng_mng = tu.make_ng_dict('mng', 'f1', ['manager'], 1)
        ng_nn = tu.make_ng_dict('nn', 'f1', ['namenode'], 1)
        ng_jt = tu.make_ng_dict('jt', 'f1', ['jobtracker'], 1)
        ng_dn = tu.make_ng_dict('dn', 'f1', ['datanode'], 2)
        ng_tt = tu.make_ng_dict('tt', 'f1', ['tasktracker'], 2)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_nn] + [ng_dn])
        self.assertRaises(g_ex.InvalidComponentCountException, plugin.validate,
                          cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1', [ng_mng])
        self.assertRaises(g_ex.InvalidComponentCountException, plugin.validate,
                          cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] * 2)
        self.assertRaises(g_ex.InvalidComponentCountException, plugin.validate,
                          cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] + [ng_tt])
        self.assertRaises(g_ex.RequiredServiceMissingException,
                          plugin.validate, cl)

        cl = tu.create_cluster('cl1', 't1', 'intel', '2.5.1',
                               [ng_mng] + [ng_nn] + [ng_jt] * 2 + [ng_tt])
        self.assertRaises(g_ex.InvalidComponentCountException, plugin.validate,
                          cl)
Esempio n. 8
0
    def test_load_template_with_anti_affinity_single_ng(self):
        """This test checks Heat cluster template with Neutron enabled
           and anti-affinity feature enabled for single node process
           in single node group.
        """

        ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=1)
        ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 2,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=0, volumes_size=0, id=2)
        cluster = tu.create_cluster("cluster", "tenant1", "general",
                                    "1.2.1", [ng1, ng2],
                                    user_keypair_id='user_key',
                                    neutron_management_network='private_net',
                                    default_image_id='1',
                                    anti_affinity=['datanode'], image_id=None)
        aa_heat_template = h.ClusterTemplate(cluster)
        aa_heat_template.add_node_group_extra(ng1['id'], 1,
                                              get_ud_generator('line1\nline2'))
        aa_heat_template.add_node_group_extra(ng2['id'], 2,
                                              get_ud_generator('line2\nline3'))

        self.override_config("use_neutron", True)
        main_template = h._load_template(
            'main.heat', {'resources':
                          aa_heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_aa.heat")))
Esempio n. 9
0
    def test_validate(self):
        self.ng = []
        self.ng.append(tu.make_ng_dict("nn", "f1", ["namenode"], 0))
        self.ng.append(tu.make_ng_dict("ma", "f1", ["master"], 0))
        self.ng.append(tu.make_ng_dict("sl", "f1", ["slave"], 0))
        self.ng.append(tu.make_ng_dict("dn", "f1", ["datanode"], 0))

        self._validate_case(1, 1, 3, 3)
        self._validate_case(1, 1, 3, 4)
        self._validate_case(1, 1, 4, 3)

        with testtools.ExpectedException(pe.InvalidComponentCountException):
            self._validate_case(2, 1, 3, 3)

        with testtools.ExpectedException(pe.InvalidComponentCountException):
            self._validate_case(1, 2, 3, 3)

        with testtools.ExpectedException(pe.InvalidComponentCountException):
            self._validate_case(0, 1, 3, 3)

        with testtools.ExpectedException(pe.RequiredServiceMissingException):
            self._validate_case(1, 0, 3, 3)

        cl = self._create_cluster(
            1, 1, 3, 3, cluster_configs={'HDFS': {
                'dfs.replication': 4
            }})

        with testtools.ExpectedException(pe.InvalidComponentCountException):
            self.plugin.validate(cl)
Esempio n. 10
0
 def _make_node_groups(self, floating_ip_pool=None):
     ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                           floating_ip_pool=floating_ip_pool, image_id=None,
                           volumes_per_node=0, volumes_size=0, id=1)
     ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
                           floating_ip_pool=floating_ip_pool, image_id=None,
                           volumes_per_node=2, volumes_size=10, id=2)
     return ng1, ng2
Esempio n. 11
0
 def _make_node_groups(self, floating_ip_pool=None, volume_type=None):
     ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                           floating_ip_pool=floating_ip_pool, image_id=None,
                           volumes_per_node=0, volumes_size=0, id="1",
                           image_username='******', volume_type=None)
     ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
                           floating_ip_pool=floating_ip_pool, image_id=None,
                           volumes_per_node=2, volumes_size=10, id="2",
                           image_username='******', volume_type=volume_type)
     return ng1, ng2
Esempio n. 12
0
 def _make_node_groups(self, floating_ip_pool=None, volume_type=None):
     ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                           floating_ip_pool=floating_ip_pool, image_id=None,
                           volumes_per_node=0, volumes_size=0, id=1,
                           image_username='******', volume_type=None)
     ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
                           floating_ip_pool=floating_ip_pool, image_id=None,
                           volumes_per_node=2, volumes_size=10, id=2,
                           image_username='******', volume_type=volume_type)
     return ng1, ng2
Esempio n. 13
0
    def test_load_template_with_anti_affinity_single_ng(self):
        """Checks Heat cluster template with Neutron enabled.

        Checks also anti-affinity feature enabled for single node process
        in single node group.
        """

        ng1 = tu.make_ng_dict(
            "master",
            42,
            ["namenode"],
            1,
            floating_ip_pool="floating",
            image_id=None,
            volumes_per_node=0,
            volumes_size=0,
            id=1,
        )
        ng2 = tu.make_ng_dict(
            "worker",
            42,
            ["datanode"],
            2,
            floating_ip_pool="floating",
            image_id=None,
            volumes_per_node=0,
            volumes_size=0,
            id=2,
        )
        cluster = tu.create_cluster(
            "cluster",
            "tenant1",
            "general",
            "1.2.1",
            [ng1, ng2],
            user_keypair_id="user_key",
            neutron_management_network="private_net",
            default_image_id="1",
            anti_affinity=["datanode"],
            image_id=None,
        )
        aa_heat_template = h.ClusterTemplate(cluster)
        aa_heat_template.add_node_group_extra(ng1["id"], 1, get_ud_generator("line1\nline2"))
        aa_heat_template.add_node_group_extra(ng2["id"], 2, get_ud_generator("line2\nline3"))

        self.override_config("use_neutron", True)
        main_template = h._load_template("main.heat", {"resources": aa_heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text("tests/unit/resources/" "test_serialize_resources_aa.heat")),
        )
Esempio n. 14
0
    def test_load_template_use_neutron(self):
        """This test checks Heat cluster template with Neutron enabled.
           Two NodeGroups used: 'master' with Ephemeral drive attached and
           'worker' with 2 attached volumes 10GB size each
        """

        ng1 = tu.make_ng_dict(
            "master",
            42,
            ["namenode"],
            1,
            floating_ip_pool="floating",
            image_id=None,
            volumes_per_node=0,
            volumes_size=0,
            id=1,
        )
        ng2 = tu.make_ng_dict(
            "worker",
            42,
            ["datanode"],
            1,
            floating_ip_pool="floating",
            image_id=None,
            volumes_per_node=2,
            volumes_size=10,
            id=2,
        )
        cluster = tu.create_cluster(
            "cluster",
            "tenant1",
            "general",
            "1.2.1",
            [ng1, ng2],
            user_keypair_id="user_key",
            neutron_management_network="private_net",
            default_image_id="1",
            anti_affinity=[],
            image_id=None,
        )
        heat_template = h.ClusterTemplate(cluster)
        heat_template.add_node_group_extra(ng1["id"], 1, get_ud_generator("line1\nline2"))
        heat_template.add_node_group_extra(ng2["id"], 1, get_ud_generator("line2\nline3"))

        self.override_config("use_neutron", True)
        main_template = h._load_template("main.heat", {"resources": heat_template._serialize_resources()})

        self.assertEqual(
            json.loads(main_template),
            json.loads(f.get_file_text("tests/unit/resources/" "test_serialize_resources_use_neutron.heat")),
        )
Esempio n. 15
0
    def setUp(self):
        super(TestUtils, self).setUp()
        self.plugin = p.VanillaProvider()

        self.ng_manager = tu.make_ng_dict(
            'mng', 'f1', ['manager'], 1,
            [tu.make_inst_dict('mng1', 'manager')])
        self.ng_namenode = tu.make_ng_dict(
            'nn', 'f1', ['namenode'], 1,
            [tu.make_inst_dict('nn1', 'namenode')])
        self.ng_jobtracker = tu.make_ng_dict(
            'jt', 'f1', ['jobtracker'], 1,
            [tu.make_inst_dict('jt1', 'jobtracker')])
        self.ng_datanode = tu.make_ng_dict('dn', 'f1', ['datanode'], 2, [
            tu.make_inst_dict('dn1', 'datanode-1'),
            tu.make_inst_dict('dn2', 'datanode-2')
        ])
        self.ng_tasktracker = tu.make_ng_dict('tt', 'f1', ['tasktracker'], 2, [
            tu.make_inst_dict('tt1', 'tasktracker-1'),
            tu.make_inst_dict('tt2', 'tasktracker-2')
        ])
        self.ng_oozie = tu.make_ng_dict('ooz1', 'f1', ['oozie'], 1,
                                        [tu.make_inst_dict('ooz1', 'oozie')])
        self.ng_hiveserver = tu.make_ng_dict(
            'hs', 'f1', ['hiveserver'], 1,
            [tu.make_inst_dict('hs1', 'hiveserver')])
        self.ng_secondarynamenode = tu.make_ng_dict(
            'snn', 'f1', ['secondarynamenode'], 1,
            [tu.make_inst_dict('snn1', 'secondarynamenode')])
Esempio n. 16
0
    def setUp(self):
        super(TestUtils, self).setUp()
        self.plugin = p.VanillaProvider()

        self.ng_manager = tu.make_ng_dict(
            'mng', 'f1', ['manager'], 1,
            [tu.make_inst_dict('mng1', 'manager')])
        self.ng_namenode = tu.make_ng_dict(
            'nn', 'f1', ['namenode'], 1,
            [tu.make_inst_dict('nn1', 'namenode')])
        self.ng_resourcemanager = tu.make_ng_dict(
            'jt', 'f1', ['resourcemanager'], 1,
            [tu.make_inst_dict('jt1', 'resourcemanager')])
        self.ng_datanode = tu.make_ng_dict(
            'dn', 'f1', ['datanode'], 2,
            [tu.make_inst_dict('dn1', 'datanode-1'),
             tu.make_inst_dict('dn2', 'datanode-2')])
        self.ng_nodemanager = tu.make_ng_dict(
            'tt', 'f1', ['nodemanager'], 2,
            [tu.make_inst_dict('tt1', 'nodemanager-1'),
             tu.make_inst_dict('tt2', 'nodemanager-2')])
        self.ng_oozie = tu.make_ng_dict(
            'ooz1', 'f1', ['oozie'], 1,
            [tu.make_inst_dict('ooz1', 'oozie')])
        self.ng_hiveserver = tu.make_ng_dict(
            'hs', 'f1', ['hiveserver'], 1,
            [tu.make_inst_dict('hs1', 'hiveserver')])
        self.ng_secondarynamenode = tu.make_ng_dict(
            'snn', 'f1', ['secondarynamenode'], 1,
            [tu.make_inst_dict('snn1', 'secondarynamenode')])
Esempio n. 17
0
def get_fake_cluster(**kwargs):
    mng = tu.make_inst_dict('id1', 'manager_inst', management_ip='1.2.3.4')
    mng_ng = tu.make_ng_dict('manager_ng', 1, ['MANAGER'], 1, [mng])

    mst = tu.make_inst_dict('id2', 'master_inst', management_ip='1.2.3.5')
    mst_ng = tu.make_ng_dict('master_ng', 1, ['NAMENODE', 'SECONDARYNAMENODE',
                                              'RESOURCEMANAGER', 'JOBHISTORY',
                                              'OOZIE_SERVER'], 1, [mst])

    wkrs = _get_workers()
    wkrs_ng = tu.make_ng_dict('worker_ng', 1, ['DATANODE', 'NODEMANAGER'],
                              len(wkrs), wkrs)
    return tu.create_cluster('test_cluster', 1, 'cdh', '5',
                             [mng_ng, mst_ng, wkrs_ng],
                             **kwargs)
Esempio n. 18
0
    def _get_cluster(self):
        i1 = tu.make_inst_dict('id_1', 'instance_1', '1.1.1.1')
        master_proc = [
            yarn.RESOURCE_MANAGER.ui_name,
            yarn.NODE_MANAGER.ui_name,
            yarn.HISTORY_SERVER.ui_name,
            maprfs.CLDB.ui_name,
            maprfs.FILE_SERVER.ui_name,
            oozie.OOZIE.ui_name,
            management.ZOOKEEPER.ui_name,
        ]

        master_ng = tu.make_ng_dict('master', 'large', master_proc, 1, [i1])
        cluster_configs = {
            'Service': {
                'key': 'value',
                'Service Version': '1.1',
            },
        }

        cluster = tu.create_cluster(
            name='test_cluster',
            tenant='large',
            plugin='mapr',
            version='4.0.1.mrv1',
            node_groups=[master_ng],
            cluster_configs=cluster_configs,
        )
        self.ng = cluster.node_groups[0]
        self.instance = self.ng.instances[0]
        return cluster
    def test_check_cluster_scaling_missing_resource(self, ops,
                                                    m_nova, m_image):
        ops.get_engine_type_and_version.return_value = "heat.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)

        nova = mock.Mock()
        m_nova.return_value = nova
        nova.keypairs.get.side_effect = u._get_keypair
        cluster = tu.create_cluster(
            "cluster1", "tenant1", "fake", "0.1", [ng1],
            status=c_u.CLUSTER_STATUS_ACTIVE,
            sahara_info={"infrastructure_engine": "heat.1.1"},
            id='12321', user_keypair_id='keypair')
        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_exception=ex.NotFoundException,
            expected_message="Requested keypair 'keypair' not found")

        image = mock.Mock()
        m_image.return_value = image
        image.list_registered.return_value = [mock.Mock(id='image1'),
                                              mock.Mock(id='image2')]
        cluster = tu.create_cluster(
            "cluster1", "tenant1", "fake", "0.1", [ng1],
            status=c_u.CLUSTER_STATUS_ACTIVE,
            sahara_info={"infrastructure_engine": "heat.1.1"},
            id='12321', default_image_id='image_id',
            user_keypair_id='test_keypair')
        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Requested image 'image_id' is not registered")
    def test_check_cluster_scaling_add_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("test-cluster",
                                    "tenant",
                                    "vanilla",
                                    "1.2.1", [ng1],
                                    status='Active',
                                    id='12321')
        data = {
            'add_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }, {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }]
        }
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message='Duplicates in node '
                                   'group names are detected',
                                   expected_exception=ex.InvalidDataException)
        data = {
            'add_node_groups': [
                {
                    'name': 'ng',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
            ]
        }
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message="Can't add new nodegroup. "
                                   "Cluster already has nodegroup "
                                   "with name 'ng'")

        data = {
            'add_node_groups': [
                {
                    'name': 'very-very-very-very-very-very-long-ng-name',
                    'flavor_id': '42',
                    'node_processes': ['namenode'],
                    'count': 10
                },
            ]
        }
        patchers = u.start_patch()
        self._assert_check_scaling(
            data=data,
            cluster=cluster,
            expected_message="Composite hostname test-cluster-very-"
            "very-very-very-very-very-long-ng-name-"
            "010.novalocal in provisioned cluster exceeds "
            "maximum limit 64 characters",
            expected_exception=ex.InvalidDataException)
        u.stop_patch(patchers)
Esempio n. 21
0
    def test_edp_main_class_spark(self, job_get, cluster_get):
        job_get.return_value = mock.Mock(type=edp.JOB_TYPE_SPARK,
                                         interface=[])
        ng = tu.make_ng_dict('master', 42, ['namenode'], 1,
                             instances=[tu.make_inst_dict('id', 'name')])
        cluster_get.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "spark", "1.0.0", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "job_configs": {"configs": {},
                                "params": {},
                                "args": []}
            },
            bad_req_i=(1, "INVALID_DATA",
                          "%s job must "
                          "specify edp.java.main_class" % edp.JOB_TYPE_SPARK))

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "job_configs": {
                    "configs": {
                        "edp.java.main_class": "org.me.myclass"},
                    "params": {},
                    "args": []}
            })
Esempio n. 22
0
    def test_data_sources_differ(self, get_job, get_data_source, get_cluster):
        get_job.return_value = mock.Mock(type=edp.JOB_TYPE_MAPREDUCE_STREAMING,
                                         libs=[],
                                         interface=[])

        ds1_id = six.text_type(uuid.uuid4())
        ds2_id = six.text_type(uuid.uuid4())

        data_sources = {
            ds1_id: mock.Mock(type="swift", url="http://swift/test"),
            ds2_id: mock.Mock(type="swift", url="http://swift/test2"),
        }

        get_data_source.side_effect = lambda ctx, x: data_sources[x]

        ng = tu.make_ng_dict('master',
                             42, ['oozie'],
                             1,
                             instances=[tu.make_inst_dict('id', 'name')])
        get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "vanilla", "2.6.0", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {
                    "configs": {
                        "edp.streaming.mapper": "/bin/cat",
                        "edp.streaming.reducer": "/usr/bin/wc"
                    },
                    "params": {},
                    "args": []
                }
            })

        data_sources[ds2_id].url = "http://swift/test"

        err_msg = ("Provided input and output DataSources reference the "
                   "same location: %s" % data_sources[ds2_id].url)

        self._assert_create_object_validation(data={
            "cluster_id":
            six.text_type(uuid.uuid4()),
            "input_id":
            ds1_id,
            "output_id":
            ds2_id,
            "job_configs": {
                "configs": {
                    "edp.streaming.mapper": "/bin/cat",
                    "edp.streaming.reducer": "/usr/bin/wc"
                },
                "params": {},
                "args": []
            }
        },
                                              bad_req_i=(1, "INVALID_DATA",
                                                         err_msg))
Esempio n. 23
0
    def test_get_hadoop_ssh_keys(self):
        cluster_dict = {
            'name': 'cluster1',
            'plugin_name': 'mock_plugin',
            'hadoop_version': 'mock_version',
            'default_image_id': 'initial',
            'node_groups': [tu.make_ng_dict("ng1", "f1", ["s1"], 1)]}

        cluster1 = conductor.cluster_create(context.ctx(), cluster_dict)
        (private_key1, public_key1) = c_h.get_hadoop_ssh_keys(cluster1)

        #should store keys for old cluster
        cluster1 = conductor.cluster_get(context.ctx(), cluster1)
        (private_key2, public_key2) = c_h.get_hadoop_ssh_keys(cluster1)

        self.assertEqual(public_key1, public_key2)
        self.assertEqual(private_key1, private_key2)

        #should generate new keys for new cluster
        cluster_dict.update({'name': 'cluster2'})
        cluster2 = conductor.cluster_create(context.ctx(), cluster_dict)
        (private_key3, public_key3) = c_h.get_hadoop_ssh_keys(cluster2)

        self.assertNotEqual(public_key1, public_key3)
        self.assertNotEqual(private_key1, private_key3)
Esempio n. 24
0
 def _get_context(self):
     i1 = tu.make_inst_dict('id_1', 'instance_1', MANAGEMENT_IP)
     i1['internal_ip'] = INTERNAL_IP
     master_proc = [
         yarn.RESOURCE_MANAGER.ui_name,
         yarn.NODE_MANAGER.ui_name,
         yarn.HISTORY_SERVER.ui_name,
         maprfs.CLDB.ui_name,
         maprfs.FILE_SERVER.ui_name,
         oozie.OOZIE.ui_name,
         management.ZOOKEEPER.ui_name,
     ]
     master_ng = tu.make_ng_dict('master', 'large', master_proc, 1, [i1])
     cluster_configs = {
         'Service': {
             'key': 'value',
             'Service Version': '1.1',
         },
         'Oozie': {
             'Oozie Version': '4.2.0',
         }
     }
     cluster = tu.create_cluster(
         name='test_cluster',
         tenant='large',
         plugin='mapr',
         version='5.2.0.mrv2',
         node_groups=[master_ng],
         cluster_configs=cluster_configs,
     )
     self.ng = cluster.node_groups[0]
     self.instance = self.ng.instances[0]
     return cc.Context(cluster, handler.VersionHandler())
Esempio n. 25
0
    def test_check_edp_job_support_spark(self, get_job, get_cluster):
        # utils.start_patch will construct a vanilla cluster as a
        # default for get_cluster, but we want a Spark cluster.
        # So, we'll make our own.

        # Note that this means we cannot use assert_create_object_validation()
        # because it calls start_patch() and will override our setting
        job = mock.Mock(type=edp.JOB_TYPE_SPARK, mains=["main"], interface=[])
        get_job.return_value = job
        ng = tu.make_ng_dict('master',
                             42, [],
                             1,
                             instances=[tu.make_inst_dict('id', 'name')])
        get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "spark", "2.2", [ng])

        # Everything is okay, spark cluster supports EDP by default
        # because cluster requires a master and slaves >= 1
        wrap_it(
            data={
                "cluster_id": uuidutils.generate_uuid(),
                "job_configs": {
                    "configs": {
                        "edp.java.main_class": "org.me.class"
                    }
                }
            })
Esempio n. 26
0
    def test_edp_main_class_spark(self, job_get, cluster_get):
        job_get.return_value = mock.Mock(type=edp.JOB_TYPE_SPARK, interface=[])
        ng = tu.make_ng_dict('master',
                             42, ['namenode'],
                             1,
                             instances=[tu.make_inst_dict('id', 'name')])
        cluster_get.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "spark", "1.0.0", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "job_configs": {
                    "configs": {},
                    "params": {},
                    "args": []
                }
            },
            bad_req_i=(1, "INVALID_DATA", "%s job must "
                       "specify edp.java.main_class" % edp.JOB_TYPE_SPARK))

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "job_configs": {
                    "configs": {
                        "edp.java.main_class": "org.me.myclass"
                    },
                    "params": {},
                    "args": []
                }
            })
 def _get_context(self):
     i1 = tu.make_inst_dict('id_1', 'instance_1', '1.1.1.1')
     master_proc = [
         yarn.RESOURCE_MANAGER.ui_name,
         yarn.NODE_MANAGER.ui_name,
         yarn.HISTORY_SERVER.ui_name,
         maprfs.CLDB.ui_name,
         maprfs.FILE_SERVER.ui_name,
         oozie.OOZIE.ui_name,
         management.ZOOKEEPER.ui_name,
     ]
     master_ng = tu.make_ng_dict('master', 'large', master_proc, 1, [i1])
     cluster_configs = {
         'Service': {
             'key': 'value',
             'Service Version': '1.1',
         },
     }
     cluster = tu.create_cluster(
         name='test_cluster',
         tenant='large',
         plugin='mapr',
         version='4.0.1.mrv1',
         node_groups=[master_ng],
         cluster_configs=cluster_configs,
     )
     self.ng = cluster.node_groups[0]
     self.instance = self.ng.instances[0]
     return cc.Context(cluster, handler.VersionHandler())
    def test_check_cluster_scaling_add_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("test-cluster", "tenant1", "fake",
                                    "0.1",
                                    [ng1], status=c_u.CLUSTER_STATUS_ACTIVE,
                                    id='12321')
        data = {
            'add_node_groups': [
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                }
            ]
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message=self.duplicates_detected,
            expected_exception=ex.InvalidDataException)
        data = {
            'add_node_groups': [
                {
                    'name': 'ng',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
            ]
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Can't add new nodegroup. "
                             "Cluster already has nodegroup "
                             "with name 'ng'")

        data = {
            'add_node_groups': [
                {
                    'name': 'very-very-very-very-very-very-long-ng-name',
                    'flavor_id': '42',
                    'node_processes': ['namenode'],
                    'count': 10
                },
            ]
        }
        patchers = u.start_patch()
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Composite hostname test-cluster-very-"
                             "very-very-very-very-very-long-ng-name-"
                             "010.novalocal in provisioned cluster exceeds "
                             "maximum limit 64 characters",
            expected_exception=ex.InvalidDataException)
        u.stop_patch(patchers)
Esempio n. 29
0
    def setUp(self):
        super(GeneralUtilsTest, self).setUp()
        i1 = tu.make_inst_dict("i1", "master")
        i2 = tu.make_inst_dict("i2", "worker1")
        i3 = tu.make_inst_dict("i3", "worker2")
        i4 = tu.make_inst_dict("i4", "worker3")
        i5 = tu.make_inst_dict("i5", "sn")

        ng1 = tu.make_ng_dict("master", "f1", ["jt", "nn"], 1, [i1])
        ng2 = tu.make_ng_dict("workers", "f1", ["tt", "dn"], 3, [i2, i3, i4])
        ng3 = tu.make_ng_dict("sn", "f1", ["dn"], 1, [i5])

        self.c1 = tu.create_cluster("cluster1", "tenant1", "general", "2.6.0", [ng1, ng2, ng3])

        self.ng1 = self.c1.node_groups[0]
        self.ng2 = self.c1.node_groups[1]
        self.ng3 = self.c1.node_groups[2]
Esempio n. 30
0
    def setUp(self):
        i1 = tu.make_inst_dict('i1', 'master')
        i2 = tu.make_inst_dict('i2', 'worker1')
        i3 = tu.make_inst_dict('i3', 'worker2')
        i4 = tu.make_inst_dict('i4', 'worker3')
        i5 = tu.make_inst_dict('i5', 'sn')

        ng1 = tu.make_ng_dict("master", "f1", ["jt", "nn"], 1, [i1])
        ng2 = tu.make_ng_dict("workers", "f1", ["tt", "dn"], 3,
                              [i2, i3, i4])
        ng3 = tu.make_ng_dict("sn", "f1", ["dn"], 1, [i5])

        self.c1 = tu.create_cluster("cluster1", "tenant1", "general", "1.2.1",
                                    [ng1, ng2, ng3])

        self.ng1 = self.c1.node_groups[0]
        self.ng2 = self.c1.node_groups[1]
        self.ng3 = self.c1.node_groups[2]
    def test_check_cluster_scaling_add_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("test-cluster", "tenant", "vanilla",
                                    "1.2.1", [ng1], status='Active',
                                    id='12321')
        data = {
            'add_node_groups': [
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                }
            ]
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message='Duplicates in node '
                             'group names are detected')
        data = {
            'add_node_groups': [
                {
                    'name': 'ng',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                },
            ]
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Can't add new nodegroup. "
                             "Cluster already has nodegroup "
                             "with name 'ng'")

        data = {
            'add_node_groups': [
                {
                    'name': 'very-very-very-very-very-very-long-ng-name',
                    'flavor_id': '42',
                    'node_processes': ['namenode'],
                    'count': 10
                },
            ]
        }
        patchers = u.start_patch()
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Composite hostname test-cluster-very-"
                             "very-very-very-very-very-long-ng-name-"
                             "010.novalocal in provisioned cluster exceeds "
                             "maximum limit 64 characters")
        u.stop_patch(patchers)
Esempio n. 32
0
    def setUp(self):
        super(GeneralUtilsTest, self).setUp()
        i1 = tu.make_inst_dict('i1', 'master')
        i2 = tu.make_inst_dict('i2', 'worker1')
        i3 = tu.make_inst_dict('i3', 'worker2')
        i4 = tu.make_inst_dict('i4', 'worker3')
        i5 = tu.make_inst_dict('i5', 'sn')

        ng1 = tu.make_ng_dict("master", "f1", ["jt", "nn"], 1, [i1])
        ng2 = tu.make_ng_dict("workers", "f1", ["tt", "dn"], 3, [i2, i3, i4])
        ng3 = tu.make_ng_dict("sn", "f1", ["dn"], 1, [i5])

        self.c1 = tu.create_cluster("cluster1", "tenant1", "general", "2.6.0",
                                    [ng1, ng2, ng3])

        self.ng1 = self.c1.node_groups[0]
        self.ng2 = self.c1.node_groups[1]
        self.ng3 = self.c1.node_groups[2]
Esempio n. 33
0
    def test_check_heat_cluster_scaling_missing_engine(self, engine_version):
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1",
                                    [ng1], status='Active', id='12321')

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created before Juno release can't be "
                             "scaled with heat.1.1 engine")
Esempio n. 34
0
    def test_validate(self):
        self.ng = []
        self.ng.append(tu.make_ng_dict("nn", "f1", ["namenode"], 0))
        self.ng.append(tu.make_ng_dict("sn", "f1", ["secondarynamenode"], 0))
        self.ng.append(tu.make_ng_dict("jt", "f1", ["resourcemanager"], 0))
        self.ng.append(tu.make_ng_dict("tt", "f1", ["nodemanager"], 0))
        self.ng.append(tu.make_ng_dict("dn", "f1", ["datanode"], 0))
        self.ng.append(tu.make_ng_dict("hs", "f1", ["historyserver"], 0))
        self.ng.append(tu.make_ng_dict("oo", "f1", ["oozie"], 0))

        self._validate_case(1, 1, 1, 10, 10, 0, 0)
        self._validate_case(1, 0, 1, 1, 4, 0, 0)
        self._validate_case(1, 1, 1, 0, 3, 0, 0)
        self._validate_case(1, 0, 1, 0, 3, 0, 0)
        self._validate_case(1, 1, 0, 0, 3, 0, 0)
        self._validate_case(1, 0, 1, 1, 3, 1, 1)
        self._validate_case(1, 1, 1, 1, 3, 1, 0)

        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(0, 0, 1, 10, 3, 0, 0)
        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(2, 0, 1, 10, 3, 0, 0)

        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 2, 1, 1, 3, 1, 1)

        with testtools.ExpectedException(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 0, 10, 3, 0, 0)
        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 0, 2, 10, 3, 0, 0)

        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 0, 1, 1, 3, 2, 1)
        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 0, 1, 1, 3, 1, 2)
        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 1, 1, 0, 2, 0, 0)
        with testtools.ExpectedException(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 1, 1, 3, 0, 1)
        with testtools.ExpectedException(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 1, 0, 3, 1, 1)
        with testtools.ExpectedException(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 1, 1, 0, 1, 1)

        cl = self._create_cluster(
            1,
            1,
            1,
            0,
            3,
            0,
            0,
            cluster_configs={'HDFS': {
                'dfs.replication': 4
            }})

        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self.pl.validate(cl)
    def test_check_heat_cluster_scaling_missing_engine(self, ops):
        ops.get_engine_type_and_version.return_value = "heat.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
                                    [ng1], status='Active', id='12321')

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created before Juno release can't be "
                             "scaled with heat.1.1 engine")
Esempio n. 36
0
    def test_check_cluster_scaling_wrong_engine(self, engine_version):
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster(
            "cluster1", "tenant1", "vanilla", "1.2.1", [ng1],
            status='Active', id='12321',
            sahara_info={"infrastructure_engine": "heat.1.1"})

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created with heat.1.1 infrastructure "
                             "engine can't be scaled with direct.1.1 engine")
Esempio n. 37
0
    def test_check_heat_cluster_scaling_missing_engine(self, ops):
        ops.get_engine_type_and_version.return_value = "heat.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
                                    [ng1], status=c_u.CLUSTER_STATUS_ACTIVE,
                                    id='12321')

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created before Juno release can't be "
                             "scaled with heat.1.1 engine")
Esempio n. 38
0
    def test_check_cluster_scaling_wrong_engine(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster(
            "cluster1", "tenant1", "fake", "0.1", [ng1],
            status=c_u.CLUSTER_STATUS_ACTIVE, id='12321',
            sahara_info={"infrastructure_engine": "heat.1.1"})

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster created with heat.1.1 infrastructure "
                             "engine can't be scaled with direct.1.1 engine")
Esempio n. 39
0
    def test_streaming(self, get_job, get_data_source, get_cluster):
        get_job.return_value = mock.Mock(type=edp.JOB_TYPE_MAPREDUCE_STREAMING,
                                         libs=[],
                                         interface=[])

        ds1_id = uuidutils.generate_uuid()
        ds2_id = uuidutils.generate_uuid()

        data_sources = {
            ds1_id: mock.Mock(type="swift", url="http://swift/test"),
            ds2_id: mock.Mock(type="swift", url="http://swift/test2"),
        }

        get_data_source.side_effect = lambda ctx, x: data_sources[x]

        ng = tu.make_ng_dict('master',
                             42, ['oozie'],
                             1,
                             instances=[tu.make_inst_dict('id', 'name')])
        get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "vanilla", "2.7.1", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": uuidutils.generate_uuid(),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {
                    "configs": {},
                    "params": {},
                    "args": [],
                    "job_execution_info": {}
                }
            },
            bad_req_i=(1, "INVALID_DATA", "MapReduce.Streaming job "
                       "must specify streaming mapper "
                       "and reducer"))

        self._assert_create_object_validation(
            data={
                "cluster_id": uuidutils.generate_uuid(),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {
                    "configs": {
                        "edp.streaming.mapper": "/bin/cat",
                        "edp.streaming.reducer": "/usr/bin/wc"
                    },
                    "params": {},
                    "job_execution_info": {},
                    "args": []
                }
            })
Esempio n. 40
0
def make_ng_dict(name,
                 flavor,
                 processes,
                 count,
                 instances=None,
                 volumes_size=None,
                 node_configs=None,
                 resource=False,
                 **kwargs):
    return testutils.make_ng_dict(name, flavor, processes, count, instances,
                                  volumes_size, node_configs, resource,
                                  **kwargs)
Esempio n. 41
0
    def test_data_sources_differ(self, get_job, get_data_source, get_cluster):
        get_job.return_value = mock.Mock(
            type=edp.JOB_TYPE_MAPREDUCE_STREAMING, libs=[], interface=[])

        ds1_id = six.text_type(uuid.uuid4())
        ds2_id = six.text_type(uuid.uuid4())

        data_sources = {
            ds1_id: mock.Mock(type="swift", url="http://swift/test"),
            ds2_id: mock.Mock(type="swift", url="http://swift/test2"),
        }

        get_data_source.side_effect = lambda ctx, x: data_sources[x]

        ng = tu.make_ng_dict('master', 42, ['oozie'], 1,
                             instances=[tu.make_inst_dict('id', 'name')])
        get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "vanilla", "2.7.1", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {
                    "configs": {
                        "edp.streaming.mapper": "/bin/cat",
                        "edp.streaming.reducer": "/usr/bin/wc"},
                    "params": {},
                    "job_execution_info": {},
                    "args": []}
            })

        data_sources[ds2_id].url = "http://swift/test"

        err_msg = ("Provided input and output DataSources reference the "
                   "same location: %s" % data_sources[ds2_id].url)

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {
                    "configs": {
                        "edp.streaming.mapper": "/bin/cat",
                        "edp.streaming.reducer": "/usr/bin/wc"},
                    "params": {},
                    "job_execution_info": {},
                    "args": []}
            },
            bad_req_i=(1, "INVALID_DATA", err_msg))
Esempio n. 42
0
def make_ng_dict_with_inst(counter, name, flavor, processes, count,
                           instances=None, volumes_size=None,
                           node_configs=None, **kwargs):
    if not instances:
        instances = []
        for i in range(count):
            n = six.next(counter)
            instance = tu.make_inst_dict("id{0}".format(n),
                                         "fake_inst{0}".format(n),
                                         management_ip='1.2.3.{0}'.format(n))
            instances.append(instance)
    return tu.make_ng_dict(name, flavor, processes, count, instances,
                           volumes_size, node_configs, **kwargs)
def make_ng_dict_with_inst(counter, name, flavor,
                           processes, count, instances=None,
                           **kwargs):
    if not instances:
        instances = []
        for i in range(count):
            n = six.next(counter)
            instance = tu.make_inst_dict("id{0}".format(n),
                                         "fake_inst{0}".format(n),
                                         management_ip='1.2.3.{0}'.format(n))
            instances.append(instance)
    return tu.make_ng_dict(name, flavor, processes,
                           count, instances, **kwargs)
    def test_check_cluster_scaling_resize_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1",
                                    "tenant1",
                                    "vanilla",
                                    "1.2.1", [ng1],
                                    status='Validating',
                                    id='12321')

        self._assert_check_scaling(data={},
                                   cluster=cluster,
                                   expected_message="Cluster cannot be scaled "
                                   "not in 'Active' "
                                   "status. Cluster status: "
                                   "Validating")

        cluster = tu.create_cluster("cluster1",
                                    "tenant1",
                                    "vanilla",
                                    "1.2.1", [ng1],
                                    status='Active',
                                    id='12321')
        data = {
            'resize_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }],
        }
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message="Cluster doesn't contain "
                                   "node group with name 'a'")
        data.update({
            'resize_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }, {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }]
        })
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message='Duplicates in node '
                                   'group names are detected',
                                   expected_exception=ex.InvalidDataException)
Esempio n. 45
0
    def test_validate(self):
        self.ng = []
        self.ng.append(tu.make_ng_dict("nn", "f1", ["namenode"], 0))
        self.ng.append(tu.make_ng_dict("jt", "f1", ["jobtracker"], 0))
        self.ng.append(tu.make_ng_dict("tt", "f1", ["tasktracker"], 0))
        self.ng.append(tu.make_ng_dict("oozie", "f1", ["oozie"], 0))

        self._validate_case(1, 1, 10, 1)

        with self.assertRaises(ex.InvalidComponentCountException):
            self._validate_case(0, 1, 10, 1)
        with self.assertRaises(ex.InvalidComponentCountException):
            self._validate_case(2, 1, 10, 1)

        with self.assertRaises(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 10, 1)
        with self.assertRaises(ex.InvalidComponentCountException):
            self._validate_case(1, 2, 10, 1)

        with self.assertRaises(ex.InvalidComponentCountException):
            self._validate_case(1, 1, 0, 2)
        with self.assertRaises(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 0, 1)
Esempio n. 46
0
    def test_edp_main_class_java(self, job_get, cluster_get):
        job_get.return_value = mock.Mock(type=edp.JOB_TYPE_JAVA, interface=[])
        ng = tu.make_ng_dict('master',
                             42, ['namenode', 'oozie'],
                             1,
                             instances=[tu.make_inst_dict('id', 'name')])
        cluster_get.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "vanilla", "2.7.1", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": uuidutils.generate_uuid(),
                "job_configs": {
                    "configs": {},
                    "params": {},
                    "args": [],
                    "job_execution_info": {}
                }
            },
            bad_req_i=(1, "INVALID_DATA", "%s job must "
                       "specify edp.java.main_class" % edp.JOB_TYPE_JAVA))

        self._assert_create_object_validation(
            data={
                "cluster_id": uuidutils.generate_uuid(),
                "job_configs": {
                    "configs": {
                        "edp.java.main_class": ""
                    },
                    "params": {},
                    "args": [],
                    "job_execution_info": {}
                }
            },
            bad_req_i=(1, "INVALID_DATA", "%s job must "
                       "specify edp.java.main_class" % edp.JOB_TYPE_JAVA))

        self._assert_create_object_validation(
            data={
                "cluster_id": uuidutils.generate_uuid(),
                "job_configs": {
                    "configs": {
                        "edp.java.main_class": "org.me.myclass"
                    },
                    "params": {},
                    "job_execution_info": {},
                    "args": []
                }
            })
    def test_check_cluster_scaling_resize_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1",
                                    "tenant1",
                                    "fake",
                                    "0.1", [ng1],
                                    status=c_u.CLUSTER_STATUS_VALIDATING,
                                    id='12321')

        self._assert_check_scaling(data={},
                                   cluster=cluster,
                                   expected_message="Cluster cannot be scaled "
                                   "not in '" + c_u.CLUSTER_STATUS_ACTIVE +
                                   "' status. Cluster status: " +
                                   c_u.CLUSTER_STATUS_VALIDATING)

        cluster = tu.create_cluster("cluster1",
                                    "tenant1",
                                    "fake",
                                    "0.1", [ng1],
                                    status=c_u.CLUSTER_STATUS_ACTIVE,
                                    id='12321')
        data = {
            'resize_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }],
        }
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message="Cluster doesn't contain "
                                   "node group with name 'a'")
        data.update({
            'resize_node_groups': [{
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }, {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }]
        })
        self._assert_check_scaling(data=data,
                                   cluster=cluster,
                                   expected_message=self.duplicates_detected,
                                   expected_exception=ex.InvalidDataException)
Esempio n. 48
0
 def _make_node_groups(self, floating_ip_pool=None):
     ng1 = tu.make_ng_dict(
         "master",
         42,
         ["namenode"],
         1,
         floating_ip_pool=floating_ip_pool,
         image_id=None,
         volumes_per_node=0,
         volumes_size=0,
         id=1,
     )
     ng2 = tu.make_ng_dict(
         "worker",
         42,
         ["datanode"],
         1,
         floating_ip_pool=floating_ip_pool,
         image_id=None,
         volumes_per_node=2,
         volumes_size=10,
         id=2,
     )
     return ng1, ng2
Esempio n. 49
0
    def test_load_template_with_volume_local_to_instance(self):
        """Checks Heat cluster template with Neutron enabled.

        Two NodeGroups used: 'master' with disabled volume_local_to_instance
        and 'worker' with enabled volume_local_to_instance.
        """
        ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=1, volumes_size=10, id=1,
                              volume_type=None, image_username='******')
        ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
                              floating_ip_pool='floating', image_id=None,
                              volumes_per_node=2, volumes_size=10, id=2,
                              image_username='******', volume_type='vol_type',
                              volume_local_to_instance=True)

        cluster = tu.create_cluster("cluster", "tenant1", "general",
                                    "1.2.1", [ng1, ng2],
                                    user_keypair_id='user_key',
                                    neutron_management_network='private_net',
                                    default_image_id='1', image_id=None,
                                    anti_affinity=[])
        heat_template = h.ClusterTemplate(cluster)
        heat_template.add_node_group_extra(ng1['id'], 1,
                                           get_ud_generator('line1\nline2'))
        heat_template.add_node_group_extra(ng2['id'], 1,
                                           get_ud_generator('line2\nline3'))

        self.override_config("use_neutron", True)
        main_template = heat_template._get_main_template()

        self.assertEqual(
            json.loads(f.get_file_text(
                "tests/unit/resources/"
                "test_serialize_resources_volume_local_to_instance.heat")),
            json.loads(main_template))
Esempio n. 50
0
    def test_validate(self):
        self.ng = []
        self.ng.append(tu.make_ng_dict("nn", "f1", ["namenode"], 0))
        self.ng.append(tu.make_ng_dict("sn", "f1", ["secondarynamenode"], 0))
        self.ng.append(tu.make_ng_dict("jt", "f1", ["resourcemanager"], 0))
        self.ng.append(tu.make_ng_dict("tt", "f1", ["nodemanager"], 0))
        self.ng.append(tu.make_ng_dict("dn", "f1", ["datanode"], 0))
        self.ng.append(tu.make_ng_dict("hs", "f1", ["historyserver"], 0))
        self.ng.append(tu.make_ng_dict("oo", "f1", ["oozie"], 0))

        self._validate_case(1, 1, 1, 10, 10, 0, 0)
        self._validate_case(1, 0, 1, 1, 4, 0, 0)
        self._validate_case(1, 1, 1, 0, 3, 0, 0)
        self._validate_case(1, 0, 1, 0, 3, 0, 0)
        self._validate_case(1, 1, 0, 0, 3, 0, 0)
        self._validate_case(1, 0, 1, 1, 3, 1, 1)
        self._validate_case(1, 1, 1, 1, 3, 1, 0)

        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(0, 0, 1, 10, 3, 0, 0)
        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(2, 0, 1, 10, 3, 0, 0)

        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 2, 1, 1, 3, 1, 1)

        with testtools.ExpectedException(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 0, 10, 3, 0, 0)
        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 0, 2, 10, 3, 0, 0)

        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 0, 1, 1, 3, 2, 1)
        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 0, 1, 1, 3, 1, 2)
        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self._validate_case(1, 1, 1, 0, 2, 0, 0)
        with testtools.ExpectedException(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 1, 1, 3, 0, 1)
        with testtools.ExpectedException(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 1, 0, 3, 1, 1)
        with testtools.ExpectedException(ex.RequiredServiceMissingException):
            self._validate_case(1, 0, 1, 1, 0, 1, 1)

        cl = self._create_cluster(
            1, 1, 1, 0, 3, 0, 0,
            cluster_configs={'HDFS': {'dfs.replication': 4}})

        with testtools.ExpectedException(ex.InvalidComponentCountException):
            self.pl.validate(cl)
Esempio n. 51
0
    def test_edp_main_class_java(self, job_get, cluster_get):
        job_get.return_value = mock.Mock(type=edp.JOB_TYPE_JAVA, interface=[])
        ng = tu.make_ng_dict("master", 42, ["namenode", "oozie"], 1, instances=[tu.make_inst_dict("id", "name")])
        cluster_get.return_value = tu.create_cluster("cluster", "tenant1", "vanilla", "2.6.0", [ng])

        self._assert_create_object_validation(
            data={"cluster_id": six.text_type(uuid.uuid4()), "job_configs": {"configs": {}, "params": {}, "args": []}},
            bad_req_i=(1, "INVALID_DATA", "%s job must " "specify edp.java.main_class" % edp.JOB_TYPE_JAVA),
        )

        self._assert_create_object_validation(
            data={
                "cluster_id": six.text_type(uuid.uuid4()),
                "job_configs": {"configs": {"edp.java.main_class": "org.me.myclass"}, "params": {}, "args": []},
            }
        )
    def test_check_cluster_scaling_resize_ng(self, ops):
        ops.get_engine_type_and_version.return_value = "direct.1.1"
        ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1)
        cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
                                    [ng1],
                                    status=c_u.CLUSTER_STATUS_VALIDATING,
                                    id='12321')

        self._assert_check_scaling(
            data={}, cluster=cluster,
            expected_message="Cluster cannot be scaled "
                             "not in '" + c_u.CLUSTER_STATUS_ACTIVE +
            "' status. Cluster status: " +
            c_u.CLUSTER_STATUS_VALIDATING)

        cluster = tu.create_cluster("cluster1", "tenant1", "fake", "0.1",
                                    [ng1], status=c_u.CLUSTER_STATUS_ACTIVE,
                                    id='12321')
        data = {
            'resize_node_groups': [
                {
                    'name': 'a',
                    'flavor_id': '42',
                    'node_processes': ['namenode']
                }
            ],
        }
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message="Cluster doesn't contain "
                             "node group with name 'a'")
        data.update({'resize_node_groups': [
            {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            },
            {
                'name': 'a',
                'flavor_id': '42',
                'node_processes': ['namenode']
            }
        ]})
        self._assert_check_scaling(
            data=data, cluster=cluster,
            expected_message=self.duplicates_detected,
            expected_exception=ex.InvalidDataException)
Esempio n. 53
0
    def test_streaming(self, get_job, get_data_source, get_cluster):
        get_job.return_value = mock.Mock(
            type=edp.JOB_TYPE_MAPREDUCE_STREAMING, libs=[], interface=[])

        ds1_id = uuidutils.generate_uuid()
        ds2_id = uuidutils.generate_uuid()

        data_sources = {
            ds1_id: mock.Mock(type="swift", url="http://swift/test"),
            ds2_id: mock.Mock(type="swift", url="http://swift/test2"),
        }

        get_data_source.side_effect = lambda ctx, x: data_sources[x]

        ng = tu.make_ng_dict('master', 42, ['oozie'], 1,
                             instances=[tu.make_inst_dict('id', 'name')])
        get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
                                                     "fake", "0.1", [ng])

        self._assert_create_object_validation(
            data={
                "cluster_id": uuidutils.generate_uuid(),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {"configs": {},
                                "params": {},
                                "args": [],
                                "job_execution_info": {}}
            },
            bad_req_i=(1, "INVALID_DATA",
                          "MapReduce.Streaming job "
                          "must specify streaming mapper "
                          "and reducer"))

        self._assert_create_object_validation(
            data={
                "cluster_id": uuidutils.generate_uuid(),
                "input_id": ds1_id,
                "output_id": ds2_id,
                "job_configs": {
                    "configs": {
                        "edp.streaming.mapper": "/bin/cat",
                        "edp.streaming.reducer": "/usr/bin/wc"},
                    "params": {},
                    "job_execution_info": {},
                    "args": []}
            })