コード例 #1
0
 def configure_os(self, instances):
     # instances non-empty
     u.add_provisioning_step(
         instances[0].cluster_id, _("Configure OS"), len(instances))
     with context.PluginsThreadGroup() as tg:
         for inst in instances:
             tg.spawn('cdh-repo-conf-%s' % inst.instance_name,
                      self._configure_repo_from_inst, inst)
コード例 #2
0
def disable_repos(cluster):
    if configs.use_base_repos_needed(cluster):
        LOG.debug("Using base repos")
        return
    instances = plugin_utils.get_instances(cluster)
    with context.PluginsThreadGroup() as tg:
        for inst in instances:
            tg.spawn("disable-repos-%s" % inst.instance_name,
                     _disable_repos_on_inst, inst)
コード例 #3
0
def _setup_agents(instances, manager_address):
    plugin_utils.add_provisioning_step(instances[0].cluster.id,
                                       _("Set up Ambari agents"),
                                       len(instances))
    with context.PluginsThreadGroup() as tg:
        for inst in instances:
            tg.spawn("hwx-agent-setup-%s" % inst.id, _setup_agent, inst,
                     manager_address)
    LOG.debug("Ambari agents have been installed")
コード例 #4
0
    def install_packages(self, instances, packages):
        # instances non-empty
        u.add_provisioning_step(
            instances[0].cluster_id, _("Install packages"), len(instances))

        with context.PluginsThreadGroup() as tg:
            for i in instances:
                tg.spawn('cdh-inst-pkgs-%s' % i.instance_name,
                         self._install_pkgs, i, packages)
コード例 #5
0
 def update_configs(self, instances):
     # instances non-empty
     utils.add_provisioning_step(
         instances[0].cluster_id, _("Update configs"), len(instances))
     with context.PluginsThreadGroup() as tg:
         for instance in instances:
             tg.spawn("update-configs-%s" % instance.instance_name,
                      self._update_configs, instance)
             context.sleep(1)
コード例 #6
0
def start_zk_server(instances):
    utils.add_provisioning_step(instances[0].cluster_id,
                                utils.start_process_event_message("ZooKeeper"),
                                len(instances))

    with context.PluginsThreadGroup() as tg:
        for instance in instances:
            with context.set_current_instance_id(instance.instance_id):
                tg.spawn('ZK-start-processes-%s' % instance.instance_name,
                         _start_zk_processes, instance, 'start')
コード例 #7
0
    def start_cloudera_agents(self, instances):
        # instances non-empty
        u.add_provisioning_step(
            instances[0].cluster_id, _("Start Cloudera Agents"),
            len(instances))

        with context.PluginsThreadGroup() as tg:
            for i in instances:
                tg.spawn('cdh-agent-start-%s' % i.instance_name,
                         self._start_cloudera_agent, i)
コード例 #8
0
    def _start_slave_processes(self, sl_instances):
        if len(sl_instances) == 0:
            return
        utils.add_provisioning_step(sl_instances[0].cluster_id,
                                    utils.start_process_event_message("Slave"),
                                    len(sl_instances))

        with context.PluginsThreadGroup() as tg:
            for i in sl_instances:
                tg.spawn('storm-start-sl-%s' % i.instance_name,
                         self._start_slaves, i)
コード例 #9
0
    def _start_zookeeper_processes(self, zk_instances):
        if len(zk_instances) == 0:
            return

        utils.add_provisioning_step(
            zk_instances[0].cluster_id,
            utils.start_process_event_message("Zookeeper"), len(zk_instances))

        with context.PluginsThreadGroup() as tg:
            for i in zk_instances:
                tg.spawn('storm-start-zk-%s' % i.instance_name,
                         self._start_zookeeper, i)
コード例 #10
0
    def _start_datanode_processes(self, dn_instances):
        if len(dn_instances) == 0:
            return

        utils.add_provisioning_step(
            dn_instances[0].cluster_id,
            utils.start_process_event_message("DataNodes"), len(dn_instances))

        with context.PluginsThreadGroup() as tg:
            for i in dn_instances:
                tg.spawn('spark-start-dn-%s' % i.instance_name,
                         self._start_datanode, i)
コード例 #11
0
    def configure_swift(self, cluster, instances=None):
        if self.c_helper.is_swift_enabled(cluster):
            if not instances:
                instances = u.get_instances(cluster)
            u.add_provisioning_step(
                cluster.id, _("Configure Swift"), len(instances))

            with context.PluginsThreadGroup() as tg:
                for i in instances:
                    tg.spawn('cdh-swift-conf-%s' % i.instance_name,
                             self._configure_swift_to_inst, i)
            swift_helper.install_ssl_certs(instances)
コード例 #12
0
 def post_install(self, cluster_context, instances):
     LOG.debug('Initializing MapR FS')
     instances = instances or cluster_context.get_instances()
     file_servers = cluster_context.filter_instances(instances, FILE_SERVER)
     utils.add_provisioning_step(cluster_context.cluster.id,
                                 _("Initializing MapR-FS"),
                                 len(file_servers))
     with context.PluginsThreadGroup() as tg:
         for instance in file_servers:
             tg.spawn('init-mfs-%s' % instance.id, self._init_mfs_instance,
                      instance)
     LOG.info('MapR FS successfully initialized')
コード例 #13
0
    def _push_configs_to_nodes(self, cluster, extra, new_instances):
        all_instances = utils.get_instances(cluster)
        utils.add_provisioning_step(cluster.id, _("Push configs to nodes"),
                                    len(all_instances))

        with context.PluginsThreadGroup() as tg:
            for instance in all_instances:
                if instance in new_instances:
                    tg.spawn('storm-configure-%s' % instance.instance_name,
                             self._push_configs_to_new_node, cluster, extra,
                             instance)
                else:
                    tg.spawn('storm-reconfigure-%s' % instance.instance_name,
                             self._push_configs_to_existing_node, cluster,
                             extra, instance)
コード例 #14
0
def refresh_zk_servers(cluster, to_delete_instances=None):
    instances = vu.get_zk_servers(cluster)
    if to_delete_instances:
        for instance in to_delete_instances:
            if instance in instances:
                instances.remove(instance)

    utils.add_provisioning_step(cluster.id,
                                utils.start_process_event_message("ZooKeeper"),
                                len(instances))

    with context.PluginsThreadGroup() as tg:
        for instance in instances:
            with context.set_current_instance_id(instance.instance_id):
                tg.spawn('ZK-restart-processes-%s' % instance.instance_name,
                         _start_zk_processes, instance, 'restart')
コード例 #15
0
def start_dn_nm_processes(instances):
    filternames = ['datanode', 'nodemanager']
    instances = utils.instances_with_services(instances, filternames)

    if len(instances) == 0:
        return

    utils.add_provisioning_step(
        instances[0].cluster_id,
        utils.start_process_event_message("DataNodes, NodeManagers"),
        len(instances))

    with context.PluginsThreadGroup() as tg:
        for instance in instances:
            with context.set_current_instance_id(instance.instance_id):
                processes = set(instance.node_group.node_processes)
                processes = processes.intersection(filternames)
                tg.spawn('vanilla-start-processes-%s' % instance.instance_name,
                         _start_processes, instance, list(processes))
コード例 #16
0
    def _configure_sh_cluster(self, cluster_context, instances):
        LOG.debug('Executing configure.sh')

        if not instances:
            instances = cluster_context.get_instances()
        script = cluster_context.configure_sh

        db_specs = dict(mysql.MySQL.METRICS_SPECS._asdict())
        db_specs.update({
            'host': mysql.MySQL.get_db_instance(cluster_context).internal_ip,
            'port': mysql.MySQL.MYSQL_SERVER_PORT,
        })

        with context.PluginsThreadGroup() as tg:
            for instance in instances:
                tg.spawn('configure-sh-%s' % instance.id,
                         self._configure_sh_instance, cluster_context,
                         instance, script, db_specs)
        LOG.debug('Executing configure.sh successfully completed')
コード例 #17
0
def provision_keypairs(cluster, instances=None):
    extra = cluster.extra.to_dict() if cluster.extra else {}
    # use same keypair for scaling
    keypair = extra.get('vanilla_keypair')
    if not instances:
        instances = utils.get_instances(cluster)
    else:
        # scaling
        if not keypair:
            # cluster created before mitaka, skipping provisioning
            return
    if not keypair:
        private, public = utils.generate_key_pair()
        keypair = {'public': public, 'private': private}
        extra['vanilla_keypair'] = keypair
        extra['vanilla_keypair']['private'] = _store_secret(keypair['private'])
        conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
    else:
        keypair['private'] = _get_secret(keypair['private'])
    with context.PluginsThreadGroup() as tg:
        for instance in instances:
            tg.spawn('provision-key-%s' % instance.instance_name,
                     _provision_key, instance, keypair)
コード例 #18
0
 def _stop_nodes(self, instances, sys_service):
     with context.PluginsThreadGroup() as tg:
         for instance in instances:
             tg.spawn('stop-%s-%s' % (sys_service, instance.id),
                      self._stop_service, instance, sys_service)
コード例 #19
0
def execute_on_instances(instances, function, *args, **kwargs):
    with context.PluginsThreadGroup() as tg:
        for instance in instances:
            t_name = '%s-execution' % function.__name__
            tg.spawn(t_name, function, instance, *args, **kwargs)