Пример #1
0
    def create_cluster(self, cluster):
        version = cluster.hadoop_version
        handler = self.version_factory.get_version_handler(version)

        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                version, cluster.cluster_configs))
        hosts = self._get_servers(cluster)
        ambari_info = self.get_ambari_info(cluster_spec)
        self.cluster_ambari_mapping[cluster.name] = ambari_info
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host, cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        self._provision_cluster(
            cluster.name, cluster_spec, ambari_info, servers,
            cluster.hadoop_version)

        # add the topology data file and script if rack awareness is
        # enabled
        self._configure_topology_for_cluster(cluster, servers)

        LOG.info("Install of Hadoop stack successful.")
        # add service urls
        self._set_cluster_info(cluster, cluster_spec)
Пример #2
0
    def configure_hdfs_ha(self, cluster):
        LOG.debug("Configuring HDFS HA")
        version = cluster.hadoop_version
        handler = self.version_factory.get_version_handler(version)

        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(version,
                                              cluster.cluster_configs))
        hosts = self._get_servers(cluster)
        ambari_info = self.get_ambari_info(cluster_spec)
        self.cluster_ambari_mapping[cluster.name] = ambari_info
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host,
                               cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        ambari_client = handler.get_ambari_client()
        ambari_client.setup_hdfs_ha(cluster_spec, servers, ambari_info,
                                    cluster.name)
        LOG.info(_LI("Configure HDFS HA successful."))
Пример #3
0
    def scale_cluster(self, cluster, instances):
        handler = self.version_factory.get_version_handler(
            cluster.hadoop_version)
        ambari_client = handler.get_ambari_client()
        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                cluster.hadoop_version, cluster.cluster_configs))
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for instance in instances:
            host_role = utils.get_host_role(instance)
            servers.append(h.HadoopServer(instance,
                                          cluster_spec.node_groups
                                          [host_role],
                                          ambari_rpm=rpm))

        ambari_info = self.get_ambari_info(cluster_spec)
        self._update_ambari_info_credentials(cluster_spec, ambari_info)

        for server in servers:
            self._spawn('Ambari provisioning thread',
                        server.provision_ambari, ambari_info, cluster_spec)

        ambari_client.configure_scaled_cluster_instances(
            cluster.name, cluster_spec, self._get_num_hosts(cluster),
            ambari_info)
        self._configure_topology_for_cluster(cluster, servers)
        ambari_client.start_scaled_cluster_instances(cluster.name,
                                                     cluster_spec, servers,
                                                     ambari_info)

        ambari_client.cleanup(ambari_info)
Пример #4
0
    def create_cluster(self, cluster):
        version = cluster.hadoop_version
        handler = self.version_factory.get_version_handler(version)

        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(version,
                                              cluster.cluster_configs))
        hosts = self._get_servers(cluster)
        ambari_info = self.get_ambari_info(cluster_spec)
        self.cluster_ambari_mapping[cluster.name] = ambari_info
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host,
                               cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        self._provision_cluster(cluster.name, cluster_spec, ambari_info,
                                servers, cluster.hadoop_version)

        # add the topology data file and script if rack awareness is
        # enabled
        self._configure_topology_for_cluster(cluster, servers)

        LOG.info(_LI("Install of Hadoop stack successful."))
        # add service urls
        self._set_cluster_info(cluster, cluster_spec)

        # check if HDFS HA is enabled; set it up if so
        if cluster_spec.is_hdfs_ha_enabled(cluster):
            self.configure_hdfs_ha(cluster)
Пример #5
0
    def scale_cluster(self, cluster, instances):
        handler = self.version_factory.get_version_handler(
            cluster.hadoop_version)
        ambari_client = handler.get_ambari_client()
        cluster_spec = handler.get_cluster_spec(
            cluster,
            self._map_to_user_inputs(cluster.hadoop_version,
                                     cluster.cluster_configs))
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for instance in instances:
            host_role = utils.get_host_role(instance)
            servers.append(
                h.HadoopServer(instance,
                               cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        ambari_info = self.get_ambari_info(cluster_spec)
        self._update_ambari_info_credentials(cluster_spec, ambari_info)

        for server in servers:
            self._spawn('Ambari provisioning thread', server.provision_ambari,
                        ambari_info, cluster_spec)

        ambari_client.configure_scaled_cluster_instances(
            cluster.name, cluster_spec, self._get_num_hosts(cluster),
            ambari_info)
        self._configure_topology_for_cluster(cluster, servers)
        ambari_client.start_scaled_cluster_instances(cluster.name,
                                                     cluster_spec, servers,
                                                     ambari_info)

        ambari_client.cleanup(ambari_info)
Пример #6
0
    def scale_cluster(self, cluster, instances):
        handler = self.version_factory.get_version_handler(
            cluster.hadoop_version)
        ambari_client = handler.get_ambari_client()
        cluster_spec = handler.get_cluster_spec(
            cluster,
            self._map_to_user_inputs(cluster.hadoop_version,
                                     cluster.cluster_configs))
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for instance in instances:
            host_role = utils.get_host_role(instance)
            servers.append(
                h.HadoopServer(instance,
                               cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        ambari_info = self.get_ambari_info(cluster_spec)
        self._update_ambari_info_credentials(cluster_spec, ambari_info)

        cpo.add_provisioning_step(cluster.id,
                                  _("Provision cluster via Ambari"),
                                  len(servers))

        with context.ThreadGroup() as tg:
            for server in servers:
                with context.set_current_instance_id(
                        server.instance['instance_id']):
                    tg.spawn('Ambari provisioning thread',
                             server.provision_ambari, ambari_info,
                             cluster_spec)

        ambari_client.configure_scaled_cluster_instances(
            cluster.name, cluster_spec, self._get_num_hosts(cluster),
            ambari_info)
        self._configure_topology_for_cluster(cluster, servers)
        ambari_client.start_scaled_cluster_instances(cluster.name,
                                                     cluster_spec, servers,
                                                     ambari_info)

        ambari_client.cleanup(ambari_info)
Пример #7
0
    def scale_cluster(self, cluster, instances):
        handler = self.version_factory.get_version_handler(
            cluster.hadoop_version)
        ambari_client = handler.get_ambari_client()
        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                cluster.hadoop_version, cluster.cluster_configs))
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for instance in instances:
            host_role = utils.get_host_role(instance)
            servers.append(h.HadoopServer(instance,
                                          cluster_spec.node_groups
                                          [host_role],
                                          ambari_rpm=rpm))

        ambari_info = self.get_ambari_info(cluster_spec)
        self._update_ambari_info_credentials(cluster_spec, ambari_info)

        cpo.add_provisioning_step(
            cluster.id, _("Provision cluster via Ambari"), len(servers))

        with context.ThreadGroup() as tg:
            for server in servers:
                with context.set_current_instance_id(
                        server.instance['instance_id']):
                    tg.spawn('Ambari provisioning thread',
                             server.provision_ambari,
                             ambari_info, cluster_spec)

        ambari_client.configure_scaled_cluster_instances(
            cluster.name, cluster_spec, self._get_num_hosts(cluster),
            ambari_info)
        self._configure_topology_for_cluster(cluster, servers)
        ambari_client.start_scaled_cluster_instances(cluster.name,
                                                     cluster_spec, servers,
                                                     ambari_info)

        ambari_client.cleanup(ambari_info)
Пример #8
0
    def configure_hdfs_ha(self, cluster):
        version = cluster.hadoop_version
        handler = self.version_factory.get_version_handler(version)

        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                version, cluster.cluster_configs))
        hosts = self._get_servers(cluster)
        ambari_info = self.get_ambari_info(cluster_spec)
        self.cluster_ambari_mapping[cluster.name] = ambari_info
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host, cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        ambari_client = handler.get_ambari_client()
        ambari_client.setup_hdfs_ha(cluster_spec, servers, ambari_info,
                                    cluster.name)