Ejemplo n.º 1
0
    def test_instances_with_services(self):
        inst = [FakeInstace(["1", "2", "3"]), FakeInstace(["1", "3"]),
                FakeInstace(["1"]), FakeInstace(["3"])]

        self.assertEqual(4, len(pu.instances_with_services(inst, ["1", "3"])))
        self.assertEqual(1, len(pu.instances_with_services(inst, ["2"])))
        self.assertEqual(3, len(pu.instances_with_services(inst, ["3"])))
        self.assertEqual(0, len(pu.instances_with_services(inst, ["5"])))
Ejemplo n.º 2
0
    def test_instances_with_services(self):
        inst = [FakeInstance("1", ["nodeprocess1"]),
                FakeInstance("2", ["nodeprocess2"])]

        node_processes = ["nodeprocess"]
        res = pu.instances_with_services(inst, node_processes)
        self.assertEqual([], res)

        node_processes = ["nodeprocess1"]
        res = pu.instances_with_services(inst, node_processes)
        self.assertEqual([FakeInstance("1", ["nodeprocess1"])], res)
Ejemplo n.º 3
0
    def test_instances_with_services(self):
        inst = [
            FakeInstace(["1", "2", "3"]),
            FakeInstace(["1", "3"]),
            FakeInstace(["1"]),
            FakeInstace(["3"])
        ]

        self.assertEqual(4, len(pu.instances_with_services(inst, ["1", "3"])))
        self.assertEqual(1, len(pu.instances_with_services(inst, ["2"])))
        self.assertEqual(3, len(pu.instances_with_services(inst, ["3"])))
        self.assertEqual(0, len(pu.instances_with_services(inst, ["5"])))
Ejemplo n.º 4
0
def deploy_kerberos_principals(cluster, instances=None):
    if not kerberos.is_kerberos_security_enabled(cluster):
        return
    if instances is None:
        instances = plugin_utils.get_instances(cluster)
    mapper = {
        'hdfs':
        plugin_utils.instances_with_services(instances, [
            p_common.SECONDARY_NAMENODE, p_common.NAMENODE, p_common.DATANODE,
            p_common.JOURNAL_NODE
        ]),
        'spark':
        plugin_utils.instances_with_services(
            instances, [p_common.SPARK_JOBHISTORYSERVER]),
        'oozie':
        plugin_utils.instances_with_services(instances,
                                             [p_common.OOZIE_SERVER]),
    }

    kerberos.create_keytabs_for_map(cluster, mapper)
Ejemplo n.º 5
0
    def _start_tt_dn_processes(self, instances):
        tt_dn_names = ["datanode", "tasktracker"]

        instances = utils.instances_with_services(instances, tt_dn_names)

        if not instances:
            return

        cpo.add_provisioning_step(
            instances[0].cluster_id, utils.start_process_event_message("DataNodes, TaskTrackers"), len(instances)
        )

        with context.ThreadGroup() as tg:
            for i in instances:
                processes = set(i.node_group.node_processes)
                tt_dn_procs = processes.intersection(tt_dn_names)
                tg.spawn("vanilla-start-tt-dn-%s" % i.instance_name, self._start_tt_dn, i, list(tt_dn_procs))
Ejemplo n.º 6
0
def start_dn_nm_processes(instances):
    filternames = ['datanode', 'nodemanager']
    instances = pu.instances_with_services(instances, filternames)

    if len(instances) == 0:
        return

    cpo.add_provisioning_step(
        instances[0].cluster_id,
        pu.start_process_event_message("DataNodes, NodeManagers"),
        len(instances))

    with context.ThreadGroup() as tg:
        for instance in instances:
            processes = set(instance.node_group.node_processes)
            processes = processes.intersection(filternames)
            tg.spawn('vanilla-start-processes-%s' % instance.instance_name,
                     _start_processes, instance, list(processes))
Ejemplo n.º 7
0
    def _start_tt_dn_processes(self, instances):
        tt_dn_names = ["datanode", "tasktracker"]

        instances = utils.instances_with_services(instances, tt_dn_names)

        if not instances:
            return

        cpo.add_provisioning_step(
            instances[0].cluster_id,
            utils.start_process_event_message("DataNodes, TaskTrackers"),
            len(instances))

        with context.ThreadGroup() as tg:
            for i in instances:
                processes = set(i.node_group.node_processes)
                tt_dn_procs = processes.intersection(tt_dn_names)
                tg.spawn('vanilla-start-tt-dn-%s' % i.instance_name,
                         self._start_tt_dn, i, list(tt_dn_procs))
Ejemplo n.º 8
0
def start_dn_nm_processes(instances):
    filternames = ['datanode', 'nodemanager']
    instances = pu.instances_with_services(instances, filternames)

    if len(instances) == 0:
        return

    cpo.add_provisioning_step(
        instances[0].cluster_id,
        pu.start_process_event_message("DataNodes, NodeManagers"),
        len(instances))

    with context.ThreadGroup() as tg:
        for instance in instances:
            with context.set_current_instance_id(instance.instance_id):
                processes = set(instance.node_group.node_processes)
                processes = processes.intersection(filternames)
                tg.spawn('vanilla-start-processes-%s' % instance.instance_name,
                         _start_processes, instance, list(processes))
Ejemplo n.º 9
0
 def get_hdfs_nodes(self, cluster, instances=None):
     instances = instances if instances else u.get_instances(cluster)
     return u.instances_with_services(
         instances, ["HDFS_DATANODE", "HDFS_NAMENODE",
                     "HDFS_SECONDARYNAMENODE"])