Beispiel #1
0
    def test_get_secondarynamenodes(self):
        cl = tu.create_cluster(
            'cl1', 't1', 'vanilla', '1.2.1',
            [self.ng_manager, self.ng_namenode, self.ng_secondarynamenode])
        self.assertEqual('snn1', u.get_secondarynamenodes(cl)[0].instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager])
        self.assertEqual([], u.get_secondarynamenodes(cl))
Beispiel #2
0
    def test_get_secondarynamenodes(self):
        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager, self.ng_namenode,
                                self.ng_secondarynamenode])
        self.assertEqual('snn1', u.get_secondarynamenodes(cl)[0].instance_id)

        cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1',
                               [self.ng_manager])
        self.assertEqual([], u.get_secondarynamenodes(cl))
Beispiel #3
0
    def start_secondarynamenodes(self, cluster):
        snns = vu.get_secondarynamenodes(cluster)
        if len(snns) == 0:
            return
        cpo.add_provisioning_step(
            snns[0].cluster_id,
            utils.start_process_event_message("SecondaryNameNodes"), len(snns))

        for snn in vu.get_secondarynamenodes(cluster):
            self._start_secondarynamenode(snn)
    def start_secondarynamenodes(self, cluster):
        snns = vu.get_secondarynamenodes(cluster)
        if len(snns) == 0:
            return
        cpo.add_provisioning_step(
            snns[0].cluster_id, utils.start_process_event_message(
                "SecondaryNameNodes"), len(snns))

        for snn in vu.get_secondarynamenodes(cluster):
            self._start_secondarynamenode(snn)
    def start_cluster(self, cluster):
        nn = vu.get_namenode(cluster)
        run.format_namenode(nn)
        run.start_hadoop_process(nn, 'namenode')

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_hadoop_process(snn, 'secondarynamenode')

        rm = vu.get_resourcemanager(cluster)
        if rm:
            run.start_yarn_process(rm, 'resourcemanager')

        run.start_dn_nm_processes(utils.get_instances(cluster))

        run.await_datanodes(cluster)

        hs = vu.get_historyserver(cluster)
        if hs:
            run.start_historyserver(hs)

        oo = vu.get_oozie(cluster)
        if oo:
            run.start_oozie_process(self.pctx, oo)

        hiveserver = vu.get_hiveserver(cluster)
        if hiveserver:
            run.start_hiveserver_process(self.pctx, hiveserver)

        self._set_cluster_info(cluster)
Beispiel #6
0
    def start_cluster(self, cluster):
        nn = vu.get_namenode(cluster)
        run.format_namenode(nn)
        run.start_hadoop_process(nn, 'namenode')

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_hadoop_process(snn, 'secondarynamenode')

        rm = vu.get_resourcemanager(cluster)
        if rm:
            run.start_yarn_process(rm, 'resourcemanager')

        run.start_all_processes(utils.get_instances(cluster),
                                ['datanode', 'nodemanager'])

        run.await_datanodes(cluster)

        hs = vu.get_historyserver(cluster)
        if hs:
            run.start_historyserver(hs)

        oo = vu.get_oozie(cluster)
        if oo:
            run.start_oozie_process(self.pctx, oo)

        hiveserver = vu.get_hiveserver(cluster)
        if hiveserver:
            run.start_hiveserver_process(self.pctx, hiveserver)

        self._set_cluster_info(cluster)
Beispiel #7
0
    def start_cluster(self, cluster):
        nn = vu.get_namenode(cluster)
        run.format_namenode(nn)
        run.start_hadoop_process(nn, "namenode")

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_hadoop_process(snn, "secondarynamenode")

        rm = vu.get_resourcemanager(cluster)
        if rm:
            run.start_yarn_process(rm, "resourcemanager")

        for dn in vu.get_datanodes(cluster):
            run.start_hadoop_process(dn, "datanode")

        run.await_datanodes(cluster)

        for nm in vu.get_nodemanagers(cluster):
            run.start_yarn_process(nm, "nodemanager")

        hs = vu.get_historyserver(cluster)
        if hs:
            run.start_historyserver(hs)

        oo = vu.get_oozie(cluster)
        if oo:
            run.start_oozie_process(oo)

        self._set_cluster_info(cluster)
Beispiel #8
0
    def start_cluster(self, cluster):
        nn = vu.get_namenode(cluster)
        run.format_namenode(nn)
        run.start_hadoop_process(nn, 'namenode')

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_hadoop_process(snn, 'secondarynamenode')

        rm = vu.get_resourcemanager(cluster)
        run.start_yarn_process(rm, 'resourcemanager')

        for dn in vu.get_datanodes(cluster):
            run.start_hadoop_process(dn, 'datanode')

        run.await_datanodes(cluster)

        for nm in vu.get_nodemanagers(cluster):
            run.start_yarn_process(nm, 'nodemanager')

        hs = vu.get_historyserver(cluster)
        if hs:
            run.start_historyserver(hs)

        oo = vu.get_oozie(cluster)
        if oo:
            run.start_oozie_process(oo)

        self._set_cluster_info(cluster)
Beispiel #9
0
    def start_cluster(self, cluster):
        nn_instance = vu.get_namenode(cluster)
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_processes(remote.get_remote(snn), "secondarynamenode")

        jt_instance = vu.get_jobtracker(cluster)
        if jt_instance:
            run.start_processes(remote.get_remote(jt_instance), "jobtracker")

        self._start_tt_dn_processes(utils.get_instances(cluster))

        self._await_datanodes(cluster)

        LOG.info(_LI("Hadoop services in cluster %s have been started"),
                 cluster.name)

        oozie = vu.get_oozie(cluster)
        if oozie:
            with remote.get_remote(oozie) as r:
                if c_helper.is_mysql_enable(cluster):
                    run.mysql_start(r, oozie)
                    run.oozie_create_db(r)
                run.oozie_share_lib(r, nn_instance.hostname())
                run.start_oozie(r)
                LOG.info(_LI("Oozie service at '%s' has been started"),
                         nn_instance.hostname())

        hive_server = vu.get_hiveserver(cluster)
        if hive_server:
            with remote.get_remote(hive_server) as r:
                run.hive_create_warehouse_dir(r)
                run.hive_copy_shared_conf(
                    r, edp.get_hive_shared_conf_path('hadoop'))

                if c_helper.is_mysql_enable(cluster):
                    if not oozie or hive_server.hostname() != oozie.hostname():
                        run.mysql_start(r, hive_server)
                    run.hive_create_db(r)
                    run.hive_metastore_start(r)
                    LOG.info(_LI("Hive Metastore server at %s has been "
                                 "started"),
                             hive_server.hostname())

        LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
        self._set_cluster_info(cluster)
Beispiel #10
0
    def start_cluster(self, cluster):
        nn_instance = vu.get_namenode(cluster)
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_processes(remote.get_remote(snn), "secondarynamenode")

        jt_instance = vu.get_jobtracker(cluster)
        if jt_instance:
            run.start_processes(remote.get_remote(jt_instance), "jobtracker")

        self._start_tt_dn_processes(utils.get_instances(cluster))

        self._await_datanodes(cluster)

        LOG.info(_LI("Hadoop services in cluster %s have been started"),
                 cluster.name)

        oozie = vu.get_oozie(cluster)
        if oozie:
            with remote.get_remote(oozie) as r:
                if c_helper.is_mysql_enable(cluster):
                    run.mysql_start(r, oozie)
                    run.oozie_create_db(r)
                run.oozie_share_lib(r, nn_instance.hostname())
                run.start_oozie(r)
                LOG.info(_LI("Oozie service at '%s' has been started"),
                         nn_instance.hostname())

        hive_server = vu.get_hiveserver(cluster)
        if hive_server:
            with remote.get_remote(hive_server) as r:
                run.hive_create_warehouse_dir(r)
                run.hive_copy_shared_conf(
                    r, edp.get_hive_shared_conf_path('hadoop'))

                if c_helper.is_mysql_enable(cluster):
                    if not oozie or hive_server.hostname() != oozie.hostname():
                        run.mysql_start(r, hive_server)
                    run.hive_create_db(r, cluster.extra['hive_mysql_passwd'])
                    run.hive_metastore_start(r)
                    LOG.info(
                        _LI("Hive Metastore server at %s has been "
                            "started"), hive_server.hostname())

        LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
        self._set_cluster_info(cluster)
Beispiel #11
0
    def start_secondarynamenodes(self, cluster):
        if vu.get_secondarynamenodes(cluster) == 0:
            return

        for snn in vu.get_secondarynamenodes(cluster):
            self._start_secondarynamenode(snn)