Ejemplo n.º 1
0
    def scale_cluster(self, cluster, instances):
        processor = self._get_blueprint_processor(cluster)
        cluster_spec = clusterspec.ClusterSpec(
            json.dumps(processor.blueprint), cluster=cluster)
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for instance in instances:
            host_role = utils.get_host_role(instance)
            servers.append(h.HadoopServer(instance,
                                          cluster_spec.node_groups
                                          [host_role],
                                          ambari_rpm=rpm))

        ambari_info = self.get_ambari_info(cluster_spec)
        self._update_ambari_info_credentials(cluster_spec, ambari_info)

        for server in servers:
            self._spawn('Ambari provisioning thread',
                        server.provision_ambari, ambari_info)

        self._wait_for_host_registrations(self._get_num_hosts(cluster),
                                          ambari_info)

        #  now add the hosts and the component
        self._add_hosts_and_components(cluster_spec, servers,
                                       ambari_info, cluster.name)

        self._install_and_start_components(cluster.name, servers, ambari_info)
Ejemplo n.º 2
0
    def create_cluster(self, cluster):
        version = cluster.hadoop_version
        handler = self.version_factory.get_version_handler(version)

        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                version, cluster.cluster_configs))
        hosts = self._get_servers(cluster)
        ambari_info = self.get_ambari_info(cluster_spec)
        self.cluster_ambari_mapping[cluster.name] = ambari_info
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host, cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        self._provision_cluster(
            cluster.name, cluster_spec, ambari_info, servers,
            cluster.hadoop_version)

        LOG.info("Install of Hadoop stack successful.")
        # add service urls
        self._set_cluster_info(cluster, cluster_spec)
Ejemplo n.º 3
0
    def create_cluster(self, cluster):
        version = cluster.hadoop_version
        handler = self.version_factory.get_version_handler(version)

        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                version, cluster.cluster_configs))
        hosts = self._get_servers(cluster)
        ambari_info = self.get_ambari_info(cluster_spec)
        self.cluster_ambari_mapping[cluster.name] = ambari_info
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host, cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        self._provision_cluster(
            cluster.name, cluster_spec, ambari_info, servers,
            cluster.hadoop_version)

        # add the topology data file and script if rack awareness is
        # enabled
        self._configure_topology_for_cluster(cluster, servers)

        LOG.info("Install of Hadoop stack successful.")
        # add service urls
        self._set_cluster_info(cluster, cluster_spec)
Ejemplo n.º 4
0
    def scale_cluster(self, cluster, instances):
        handler = self.version_factory.get_version_handler(
            cluster.hadoop_version)
        ambari_client = handler.get_ambari_client()
        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                cluster.hadoop_version, cluster.cluster_configs))
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for instance in instances:
            host_role = utils.get_host_role(instance)
            servers.append(h.HadoopServer(instance,
                                          cluster_spec.node_groups
                                          [host_role],
                                          ambari_rpm=rpm))

        ambari_info = self.get_ambari_info(cluster_spec)
        self._update_ambari_info_credentials(cluster_spec, ambari_info)

        for server in servers:
            self._spawn('Ambari provisioning thread',
                        server.provision_ambari, ambari_info)

        ambari_client.scale_cluster(cluster.name, cluster_spec, servers,
                                    self._get_num_hosts(cluster), ambari_info)

        ambari_client.cleanup(ambari_info)
Ejemplo n.º 5
0
    def create_cluster(self, cluster, cluster_template):

        if cluster_template is None:
            raise ValueError("must supply cluster template")

        cluster_spec = clusterspec.ClusterSpec(cluster_template, cluster=cluster)

        hosts = self._get_servers(cluster)

        ambari_host = self._determine_host_for_server_component("AMBARI_SERVER", cluster_spec, hosts)
        self.cluster_name_to_ambari_host_mapping[cluster.name] = ambari_host
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(h.HadoopServer(host, cluster_spec.node_groups[host_role], ambari_rpm=rpm))

        provisioned = self._provision_cluster(cluster.name, cluster_spec, ambari_host, servers)
        if provisioned:
            installed = self._install_services(cluster.name, ambari_host)
            if installed:
                LOG.info("Install of Hadoop stack successful.")
                # add service urls
                self._set_cluster_info(cluster, cluster_spec, hosts)
            else:
                raise ex.HadoopProvisionError("Installation of Hadoop stack failed.")

        else:
            raise ex.HadoopProvisionError("Provisioning of Hadoop cluster failed.")
Ejemplo n.º 6
0
    def scale_cluster(self, cluster, instances):
        handler = self.version_factory.get_version_handler(
            cluster.hadoop_version)
        ambari_client = handler.get_ambari_client()
        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                cluster.hadoop_version, cluster.cluster_configs))
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for instance in instances:
            host_role = utils.get_host_role(instance)
            servers.append(h.HadoopServer(instance,
                                          cluster_spec.node_groups
                                          [host_role],
                                          ambari_rpm=rpm))

        ambari_info = self.get_ambari_info(cluster_spec)
        self._update_ambari_info_credentials(cluster_spec, ambari_info)

        for server in servers:
            self._spawn('Ambari provisioning thread',
                        server.provision_ambari, ambari_info, cluster_spec)

        ambari_client.configure_scaled_cluster_instances(
            cluster.name, cluster_spec, self._get_num_hosts(cluster),
            ambari_info)
        self._configure_topology_for_cluster(cluster, servers)
        ambari_client.start_scaled_cluster_instances(cluster.name,
                                                     cluster_spec, servers,
                                                     ambari_info)

        ambari_client.cleanup(ambari_info)
Ejemplo n.º 7
0
    def scale_cluster(self, cluster, instances):
        processor = self._get_blueprint_processor(cluster)
        cluster_spec = clusterspec.ClusterSpec(
            json.dumps(processor.blueprint), cluster=cluster)
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for instance in instances:
            host_role = utils.get_host_role(instance)
            servers.append(h.HadoopServer(instance,
                                          cluster_spec.node_groups
                                          [host_role],
                                          ambari_rpm=rpm))

        ambari_info = self.get_ambari_info(cluster_spec,
                                           self._get_servers(cluster))

        for server in servers:
            self._spawn('Ambari provisioning thread',
                        server.provision_ambari, ambari_info)

        self._wait_for_host_registrations(self._get_num_hosts(cluster),
                                          ambari_info)

        #  now add the hosts and the component
        self._add_hosts_and_components(cluster_spec, servers,
                                       ambari_info, cluster.name)

        self._install_and_start_components(cluster.name, servers, ambari_info)
Ejemplo n.º 8
0
 def _get_ambari_host(self, servers):
     # iterate thru servers and find the master server
     host = next((server for server in servers
                  if utils.get_node_processes(server) is not None
                  and 'AMBARI_SERVER' in utils.get_node_processes(server)),
                 None)
     if host is None:
         host = next((server for server in servers
                      if utils.get_host_role(server) == 'MASTER'), None)
     return host
Ejemplo n.º 9
0
    def _determine_host_for_server_component(self, component, cluster_spec, servers):
        found_node_group = None
        node_groups = cluster_spec.node_groups
        for node_group in node_groups.values():
            if component in node_group.components:
                found_node_group = node_group.name

        for host in servers:
            if utils.get_host_role(host) == found_node_group:
                return host

        raise Exception("Server component [{0}] not specified in configuration".format(component))
Ejemplo n.º 10
0
 def _get_ambari_host(self, servers):
     # iterate thru servers and find the master server
     host = next(
         (
             server
             for server in servers
             if utils.get_node_processes(server) is not None and "AMBARI_SERVER" in utils.get_node_processes(server)
         ),
         None,
     )
     if host is None:
         host = next((server for server in servers if utils.get_host_role(server) == "MASTER"), None)
     return host
Ejemplo n.º 11
0
    def _determine_host_for_server_component(self, component, cluster_spec,
                                             servers):
        found_node_group = None
        node_groups = cluster_spec.node_groups
        for node_group in node_groups.values():
            if component in node_group.components:
                found_node_group = node_group.name

        for host in servers:
            if utils.get_host_role(host) == found_node_group:
                return host

        raise Exception(
            'Server component [{0}] not specified in configuration'.format(
                component))
Ejemplo n.º 12
0
    def _generate_host_manifest(self):
        host_manifest = {}
        hosts = []
        host_id = 1

        for server in self.servers:
            instance_info = n_helper.get_instance_info(server)
            hosts.append({'host_id': host_id,
                          'hostname': server.hostname,
                          'role': utils.get_host_role(server),
                          'vm_image': instance_info.image,
                          'vm_flavor': instance_info.flavor,
                          'public_ip': server.management_ip,
                          'private_ip': server.internal_ip})
            host_id += 1

        host_manifest['hosts'] = hosts
        return json.dumps(host_manifest).strip('{}')
Ejemplo n.º 13
0
    def _generate_host_manifest(self, servers):
        host_manifest = {}
        hosts = []
        host_id = 1

        for server in servers:
            instance_info = n_helper.get_instance_info(server)
            hosts.append({
                'host_id': host_id,
                'hostname': server.hostname,
                'role': utils.get_host_role(server),
                'vm_image': instance_info.image,
                'vm_flavor': instance_info.flavor,
                'public_ip': server.management_ip,
                'private_ip': server.internal_ip
            })
            host_id += 1

        host_manifest['hosts'] = hosts
        return json.dumps(host_manifest).strip('{}')
Ejemplo n.º 14
0
    def _generate_host_manifest(self, servers):
        host_manifest = {}
        hosts = []
        host_id = 1

        for server in servers:
            instance_info = n_helper.get_instance_info(server)
            hosts.append(
                {
                    "host_id": host_id,
                    "hostname": server.hostname,
                    "role": utils.get_host_role(server),
                    "vm_image": instance_info.image,
                    "vm_flavor": instance_info.flavor,
                    "public_ip": server.management_ip,
                    "private_ip": server.internal_ip,
                }
            )
            host_id += 1

        host_manifest["hosts"] = hosts
        return json.dumps(host_manifest).strip("{}")
Ejemplo n.º 15
0
    def create_cluster(self, cluster, cluster_template):

        if cluster_template is None:
            raise ValueError('must supply cluster template')

        cluster_spec = clusterspec.ClusterSpec(cluster_template,
                                               cluster=cluster)

        hosts = self._get_servers(cluster)

        ambari_host = self._determine_host_for_server_component(
            'AMBARI_SERVER', cluster_spec, hosts)
        self.cluster_name_to_ambari_host_mapping[cluster.name] = ambari_host
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host,
                               cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        provisioned = self._provision_cluster(cluster.name, cluster_spec,
                                              ambari_host, servers)
        if provisioned:
            installed = self._install_services(cluster.name, ambari_host)
            if installed:
                LOG.info("Install of Hadoop stack successful.")
                # add service urls
                self._set_cluster_info(cluster, cluster_spec, hosts)
            else:
                raise ex.HadoopProvisionError(
                    'Installation of Hadoop stack failed.')

        else:
            raise ex.HadoopProvisionError(
                'Provisioning of Hadoop cluster failed.')
Ejemplo n.º 16
0
    def create_cluster(self, cluster, cluster_template):

        if cluster_template is None:
            raise ValueError('must supply cluster template')

        cluster_spec = clusterspec.ClusterSpec(
            cluster_template, cluster=cluster)

        hosts = self._get_servers(cluster)
        ambari_info = self.get_ambari_info(cluster_spec)
        self.cluster_ambari_mapping[cluster.name] = ambari_info
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host, cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        provisioned = self._provision_cluster(
            cluster.name, cluster_spec, ambari_info, servers)

        if provisioned:
            installed = self._install_services(cluster.name, ambari_info)
            if installed:
                LOG.info("Install of Hadoop stack successful.")
                # add service urls
                self._set_cluster_info(cluster, cluster_spec, ambari_info)
            else:
                raise ex.HadoopProvisionError(
                    'Installation of Hadoop stack failed.')

        else:
            raise ex.HadoopProvisionError(
                'Provisioning of Hadoop cluster failed.')