Esempio n. 1
0
    def _set_cluster_info(self, cluster):
        nn = utils.get_namenode(cluster)
        jt = utils.get_jobtracker(cluster)
        oozie = utils.get_oozie(cluster)
        info = {}

        if jt:
            address = c_helper.get_config_value(
                'MapReduce', 'mapred.job.tracker.http.address', cluster)
            port = address[address.rfind(':') + 1:]
            info['MapReduce'] = {
                'Web UI': 'http://%s:%s' % (jt.management_ip, port)
            }

        if nn:
            address = c_helper.get_config_value('HDFS', 'dfs.http.address',
                                                cluster)
            port = address[address.rfind(':') + 1:]
            info['HDFS'] = {
                'Web UI': 'http://%s:%s' % (nn.management_ip, port)
            }

        if oozie:
            info['JobFlow'] = {
                'Oozie': 'http://%s:11000' % oozie.management_ip
            }

        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
Esempio n. 2
0
    def _set_cluster_info(self, cluster):
        nn = utils.get_namenode(cluster)
        jt = utils.get_jobtracker(cluster)
        oozie = utils.get_oozie(cluster)
        info = {}

        if jt:
            address = c_helper.get_config_value(
                'MapReduce', 'mapred.job.tracker.http.address', cluster)
            port = address[address.rfind(':') + 1:]
            info['MapReduce'] = {
                'Web UI': 'http://%s:%s' % (jt.management_ip, port)
            }
            #TODO(aignatov) change from hardcode value
            info['MapReduce']['JobTracker'] = '%s:8021' % jt.hostname()

        if nn:
            address = c_helper.get_config_value(
                'HDFS', 'dfs.http.address', cluster)
            port = address[address.rfind(':') + 1:]
            info['HDFS'] = {
                'Web UI': 'http://%s:%s' % (nn.management_ip, port)
            }
            #TODO(aignatov) change from hardcode value
            info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname()

        if oozie:
            info['JobFlow'] = {
                'Oozie': 'http://%s:11000' % oozie.management_ip
            }

        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
Esempio n. 3
0
    def _set_cluster_info(self, cluster):
        nn = utils.get_namenode(cluster)
        jt = utils.get_jobtracker(cluster)
        oozie = utils.get_oozie(cluster)
        info = {}

        if jt:
            address = c_helper.get_config_value(
                'MapReduce', 'mapred.job.tracker.http.address', cluster)
            port = address[address.rfind(':') + 1:]
            info['MapReduce'] = {
                'Web UI': 'http://%s:%s' % (jt.management_ip, port)
            }

        if nn:
            address = c_helper.get_config_value(
                'HDFS', 'dfs.http.address', cluster)
            port = address[address.rfind(':') + 1:]
            info['HDFS'] = {
                'Web UI': 'http://%s:%s' % (nn.management_ip, port)
            }

        if oozie:
            info['JobFlow'] = {
                'Oozie': 'http://%s:11000' % oozie.management_ip
            }

        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
Esempio n. 4
0
    def test_get_config_value(self, cond_get_cluster):
        cluster = self._get_fake_cluster()
        cond_get_cluster.return_value = cluster

        self.assertEqual(
            c_h.get_config_value('HDFS', 'fs.default.name', cluster),
            'hdfs://inst1:8020')
        self.assertEqual(
            c_h.get_config_value('HDFS', 'spam', cluster), 'eggs')
        self.assertEqual(
            c_h.get_config_value('HDFS', 'dfs.safemode.extension'), 30000)
        self.assertRaises(RuntimeError,
                          c_h.get_config_value,
                          'MapReduce', 'spam', cluster)
Esempio n. 5
0
    def _validate_existing_ng_scaling(self, cluster, existing):
        scalable_processes = self._get_scalable_processes()
        dn_to_delete = 0
        for ng in cluster.node_groups:
            if ng.id in existing:
                if ng.count > existing[ng.id] and "datanode" in \
                        ng.node_processes:
                    dn_to_delete += ng.count - existing[ng.id]
                if not set(ng.node_processes).issubset(scalable_processes):
                    raise ex.NodeGroupCannotBeScaled(
                        ng.name, "Vanilla plugin cannot scale nodegroup"
                                 " with processes: " +
                                 ' '.join(ng.node_processes))

        dn_amount = len(utils.get_datanodes(cluster))
        rep_factor = c_helper.get_config_value('HDFS', 'dfs.replication',
                                               cluster)

        if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
            raise ex.ClusterCannotBeScaled(
                cluster.name, "Vanilla plugin cannot shrink cluster because "
                              "it would be not enough nodes for replicas "
                              "(replication factor is %s)" % rep_factor)
Esempio n. 6
0
    def test_get_config_value(self, cond_get_cluster):
        class FakeNG(object):
            def __init__(self, name, flavor, processes, count, instances=None,
                         configuration=None, cluster_id=None):
                self.name = name
                self.flavor = flavor
                self.node_processes = processes
                self.count = count
                self.instances = instances or []
                self.ng_configuration = configuration
                self.cluster_id = cluster_id

            def configuration(self):
                return self.ng_configuration

            def storage_paths(self):
                return ['/mnt']

        class FakeCluster(object):
            def __init__(self, name, tenant, plugin, version, node_groups):
                self.name = name
                self.tenant = tenant
                self.plugin = plugin
                self.version = version
                self.node_groups = node_groups

        class FakeInst(object):
            def __init__(self, inst_name, inst_id):
                self.instance_name = inst_name
                self.instance_id = inst_id

            def hostname(self):
                return self.instance_name

        ms_inst = FakeInst('inst1', 'id1')
        wk_inst = FakeInst('inst2', 'id2')

        conf1 = {
            "MapReduce": {},
            "HDFS": {}
        }

        conf2 = {
            "MapReduce": {},
            "HDFS": {
                "spam": "eggs"
            }
        }

        ng1 = FakeNG('master', 'fl1', ['namenode', 'jobtracker'], 1,
                     [ms_inst], conf1, 'id1')
        ng2 = FakeNG('worker', 'fl1', ['datanode', 'tasktracker'], 1,
                     [wk_inst], conf2, 'id1')
        cluster = FakeCluster('cl1', 'ten1', 'vanilla', '1.2.1', [ng1, ng2])

        cond_get_cluster.return_value = cluster

        self.assertEqual(
            c_h.get_config_value('HDFS', 'fs.default.name', cluster),
            'hdfs://inst1:8020')
        self.assertEqual(
            c_h.get_config_value('HDFS', 'dfs.data.dir', node_group=ng1),
            '/mnt/lib/hadoop/hdfs/datanode')
        self.assertEqual(
            c_h.get_config_value('HDFS', 'spam', node_group=ng2), 'eggs')
        self.assertEqual(
            c_h.get_config_value('HDFS', 'dfs.safemode.extension'), 30000)
        self.assertRaises(RuntimeError,
                          c_h.get_config_value,
                          'MapReduce', 'spam', cluster, ng1)