Beispiel #1
0
 def start_cluster(self, cluster):
     self._set_cluster_info(cluster)
     deploy.start_cluster(cluster)
     cluster_instances = plugin_utils.get_instances(cluster)
     swift_helper.install_ssl_certs(cluster_instances)
     deploy.add_hadoop_swift_jar(cluster_instances)
     deploy.prepare_hive(cluster)
     deploy.deploy_kerberos_principals(cluster)
    def configure_swift(self, cluster, instances=None):
        if self.c_helper.is_swift_enabled(cluster):
            if not instances:
                instances = u.get_instances(cluster)
            u.add_provisioning_step(
                cluster.id, _("Configure Swift"), len(instances))

            with context.PluginsThreadGroup() as tg:
                for i in instances:
                    tg.spawn('cdh-swift-conf-%s' % i.instance_name,
                             self._configure_swift_to_inst, i)
            swift_helper.install_ssl_certs(instances)
def scale_cluster(pctx, cluster, instances):
    config.configure_instances(pctx, instances)
    _update_include_files(cluster)
    run.refresh_hadoop_nodes(cluster)
    rm = vu.get_resourcemanager(cluster)
    if rm:
        run.refresh_yarn_nodes(cluster)

    config.configure_topology_data(pctx, cluster)
    run.start_dn_nm_processes(instances)
    swift_helper.install_ssl_certs(instances)
    config.configure_zookeeper(cluster)
    run.refresh_zk_servers(cluster)
Beispiel #4
0
 def scale_cluster(self, cluster, instances):
     deploy.prepare_kerberos(cluster, instances)
     deploy.setup_agents(cluster, instances)
     cluster = conductor.cluster_get(context.ctx(), cluster.id)
     deploy.wait_host_registration(cluster, instances)
     deploy.resolve_package_conflicts(cluster, instances)
     deploy.add_new_hosts(cluster, instances)
     deploy.manage_config_groups(cluster, instances)
     deploy.manage_host_components(cluster, instances)
     deploy.configure_rack_awareness(cluster, instances)
     swift_helper.install_ssl_certs(instances)
     deploy.add_hadoop_swift_jar(instances)
     deploy.deploy_kerberos_principals(cluster, instances)
Beispiel #5
0
    def scale_cluster(self, cluster, instances):
        master = utils.get_instance(cluster, "master")
        r_master = utils.get_remote(master)

        run.stop_spark(r_master, self._spark_home(cluster))

        self._setup_instances(cluster, instances)
        nn = utils.get_instance(cluster, "namenode")
        run.refresh_nodes(utils.get_remote(nn), "dfsadmin")
        dn_instances = [instance for instance in instances if
                        'datanode' in instance.node_group.node_processes]
        self._start_datanode_processes(dn_instances)

        swift_helper.install_ssl_certs(instances)
        run.start_spark_master(r_master, self._spark_home(cluster))
        LOG.info("Spark master service has been restarted")
    def start_cluster(self, cluster):
        keypairs.provision_keypairs(cluster)

        s_scripts.start_namenode(cluster)
        s_scripts.start_secondarynamenode(cluster)
        s_scripts.start_resourcemanager(cluster)

        run.start_dn_nm_processes(utils.get_instances(cluster))
        run.await_datanodes(cluster)

        s_scripts.start_historyserver(cluster)
        s_scripts.start_oozie(self.pctx, cluster)
        s_scripts.start_hiveserver(self.pctx, cluster)
        s_scripts.start_zookeeper(cluster)

        swift_helper.install_ssl_certs(utils.get_instances(cluster))

        self._set_cluster_info(cluster)
        s_scripts.start_spark(cluster)
    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        dn_instances = utils.get_instances(cluster, "datanode")

        # Start the name node
        self._start_namenode(nn_instance)

        # start the data nodes
        self._start_datanode_processes(dn_instances)
        run.await_datanodes(cluster)

        LOG.info("Hadoop services have been started")

        with utils.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
                              "/user/$USER/")

        # start spark nodes
        self.start_spark(cluster)
        swift_helper.install_ssl_certs(utils.get_instances(cluster))

        LOG.info('Cluster has been started successfully')
        self._set_cluster_info(cluster)