def add_hadoop_swift_jar(instances): new_jar = "/opt/hadoop-openstack.jar" plugin_utils.add_provisioning_step(instances[0].cluster.id, _("Add Hadoop Swift jar to instances"), len(instances)) for inst in instances: _add_hadoop_swift_jar(inst, new_jar)
def configure_os(self, instances): # instances non-empty u.add_provisioning_step( instances[0].cluster_id, _("Configure OS"), len(instances)) with context.PluginsThreadGroup() as tg: for inst in instances: tg.spawn('cdh-repo-conf-%s' % inst.instance_name, self._configure_repo_from_inst, inst)
def install_packages(self, instances, packages): # instances non-empty u.add_provisioning_step( instances[0].cluster_id, _("Install packages"), len(instances)) with context.PluginsThreadGroup() as tg: for i in instances: tg.spawn('cdh-inst-pkgs-%s' % i.instance_name, self._install_pkgs, i, packages)
def _setup_agents(instances, manager_address): plugin_utils.add_provisioning_step(instances[0].cluster.id, _("Set up Ambari agents"), len(instances)) with context.PluginsThreadGroup() as tg: for inst in instances: tg.spawn("hwx-agent-setup-%s" % inst.id, _setup_agent, inst, manager_address) LOG.debug("Ambari agents have been installed")
def update_configs(self, instances): # instances non-empty utils.add_provisioning_step( instances[0].cluster_id, _("Update configs"), len(instances)) with context.PluginsThreadGroup() as tg: for instance in instances: tg.spawn("update-configs-%s" % instance.instance_name, self._update_configs, instance) context.sleep(1)
def stop(self, cluster_context, instances=None): instances = instances or cluster_context.get_instances() zookeepers = cluster_context.filter_instances(instances, mng.ZOOKEEPER) utils.add_provisioning_step(cluster_context.cluster.id, _("Stop ZooKeepers nodes"), len(zookeepers)) self._stop_zk_nodes(zookeepers) utils.add_provisioning_step(cluster_context.cluster.id, _("Stop Warden nodes"), len(instances)) self._stop_warden_on_nodes(instances)
def configure_instances(pctx, instances): if len(instances) == 0: return utils.add_provisioning_step(instances[0].cluster_id, _("Configure instances"), len(instances)) for instance in instances: with context.set_current_instance_id(instance.instance_id): _configure_instance(pctx, instance)
def start_zk_server(instances): utils.add_provisioning_step(instances[0].cluster_id, utils.start_process_event_message("ZooKeeper"), len(instances)) with context.PluginsThreadGroup() as tg: for instance in instances: with context.set_current_instance_id(instance.instance_id): tg.spawn('ZK-start-processes-%s' % instance.instance_name, _start_zk_processes, instance, 'start')
def start_cloudera_agents(self, instances): # instances non-empty u.add_provisioning_step( instances[0].cluster_id, _("Start Cloudera Agents"), len(instances)) with context.PluginsThreadGroup() as tg: for i in instances: tg.spawn('cdh-agent-start-%s' % i.instance_name, self._start_cloudera_agent, i)
def _start_slave_processes(self, sl_instances): if len(sl_instances) == 0: return utils.add_provisioning_step(sl_instances[0].cluster_id, utils.start_process_event_message("Slave"), len(sl_instances)) with context.PluginsThreadGroup() as tg: for i in sl_instances: tg.spawn('storm-start-sl-%s' % i.instance_name, self._start_slaves, i)
def wrapped(*args, **kwargs): cluster_context = _find_argument(cluster_context_reference, *args, **kwargs) instances = _find_argument(instances_reference, *args, **kwargs) cluster_id = cluster_context.cluster.id instance_count = len(instances) utils.add_provisioning_step(cluster_id, name, instance_count) return function(*args, **kwargs)
def configure_rack_awareness(cluster, instances): if not t_helper.is_data_locality_enabled(): return with _get_ambari_client(cluster) as client: plugin_utils.add_provisioning_step(cluster.id, _("Configure rack awareness"), len(instances)) for inst in instances: _configure_topology_data(cluster, inst, client) _restart_hdfs_and_mapred_services(cluster, client)
def _start_datanode_processes(self, dn_instances): if len(dn_instances) == 0: return utils.add_provisioning_step( dn_instances[0].cluster_id, utils.start_process_event_message("DataNodes"), len(dn_instances)) with context.PluginsThreadGroup() as tg: for i in dn_instances: tg.spawn('spark-start-dn-%s' % i.instance_name, self._start_datanode, i)
def _start_zookeeper_processes(self, zk_instances): if len(zk_instances) == 0: return utils.add_provisioning_step( zk_instances[0].cluster_id, utils.start_process_event_message("Zookeeper"), len(zk_instances)) with context.PluginsThreadGroup() as tg: for i in zk_instances: tg.spawn('storm-start-zk-%s' % i.instance_name, self._start_zookeeper, i)
def configure_swift(self, cluster, instances=None): if self.c_helper.is_swift_enabled(cluster): if not instances: instances = u.get_instances(cluster) u.add_provisioning_step( cluster.id, _("Configure Swift"), len(instances)) with context.PluginsThreadGroup() as tg: for i in instances: tg.spawn('cdh-swift-conf-%s' % i.instance_name, self._configure_swift_to_inst, i) swift_helper.install_ssl_certs(instances)
def post_install(self, cluster_context, instances): LOG.debug('Initializing MapR FS') instances = instances or cluster_context.get_instances() file_servers = cluster_context.filter_instances(instances, FILE_SERVER) utils.add_provisioning_step(cluster_context.cluster.id, _("Initializing MapR-FS"), len(file_servers)) with context.PluginsThreadGroup() as tg: for instance in file_servers: tg.spawn('init-mfs-%s' % instance.id, self._init_mfs_instance, instance) LOG.info('MapR FS successfully initialized')
def wrapped(*args, **kwargs): event_instance = instance or _find_argument( instance_reference, *args, **kwargs) if name: utils.add_provisioning_step( event_instance.node_group.cluster.id, name, 1) try: result = function(*args, **kwargs) utils.add_successful_event(event_instance) return result except Exception as exception: utils.add_fail_event(event_instance, exception) raise exception
def _push_configs_to_nodes(self, cluster, extra, new_instances): all_instances = utils.get_instances(cluster) utils.add_provisioning_step(cluster.id, _("Push configs to nodes"), len(all_instances)) with context.PluginsThreadGroup() as tg: for instance in all_instances: if instance in new_instances: tg.spawn('storm-configure-%s' % instance.instance_name, self._push_configs_to_new_node, cluster, extra, instance) else: tg.spawn('storm-reconfigure-%s' % instance.instance_name, self._push_configs_to_existing_node, cluster, extra, instance)
def refresh_zk_servers(cluster, to_delete_instances=None): instances = vu.get_zk_servers(cluster) if to_delete_instances: for instance in to_delete_instances: if instance in instances: instances.remove(instance) utils.add_provisioning_step(cluster.id, utils.start_process_event_message("ZooKeeper"), len(instances)) with context.PluginsThreadGroup() as tg: for instance in instances: with context.set_current_instance_id(instance.instance_id): tg.spawn('ZK-restart-processes-%s' % instance.instance_name, _start_zk_processes, instance, 'restart')
def start_dn_nm_processes(instances): filternames = ['datanode', 'nodemanager'] instances = utils.instances_with_services(instances, filternames) if len(instances) == 0: return utils.add_provisioning_step( instances[0].cluster_id, utils.start_process_event_message("DataNodes, NodeManagers"), len(instances)) with context.PluginsThreadGroup() as tg: for instance in instances: with context.set_current_instance_id(instance.instance_id): processes = set(instance.node_group.node_processes) processes = processes.intersection(filternames) tg.spawn('vanilla-start-processes-%s' % instance.instance_name, _start_processes, instance, list(processes))
def start(self, cluster_context, instances=None): instances = instances or cluster_context.get_instances() zookeepers = cluster_context.filter_instances(instances, mng.ZOOKEEPER) cldbs = cluster_context.filter_instances(instances, mfs.CLDB) others = filter( lambda i: not cluster_context.check_for_process(i, mfs.CLDB), instances) utils.add_provisioning_step(cluster_context.cluster.id, _("Start ZooKeepers nodes"), len(zookeepers)) self._start_zk_nodes(zookeepers) utils.add_provisioning_step(cluster_context.cluster.id, _("Start CLDB nodes"), len(cldbs)) self._start_cldb_nodes(cldbs) if others: utils.add_provisioning_step(cluster_context.cluster.id, _("Start non-CLDB nodes"), len(list(others))) self._start_non_cldb_nodes(others) self._await_cldb(cluster_context, instances)
def add_new_hosts(cluster, instances): with _get_ambari_client(cluster) as client: plugin_utils.add_provisioning_step(cluster.id, _("Add new hosts"), len(instances)) for inst in instances: _add_host_to_cluster(inst, client)
def configure_instances(self, instances, cluster=None): # instances non-empty utils.add_provisioning_step( instances[0].cluster_id, _("Configure instances"), len(instances)) for inst in instances: self.configure_instance(inst, cluster)