def configure_general_environment(self, cluster_context, instances=None): LOG.debug('Executing post configure hooks') mapr_user_pass = pu.get_mapr_password(cluster_context.cluster) if not instances: instances = cluster_context.get_instances() def set_user_password(instance): LOG.debug('Setting password for user "mapr"') if self.mapr_user_exists(instance): with instance.remote() as r: r.execute_command( 'echo "%s:%s"|chpasswd' % ('mapr', mapr_user_pass), run_as_root=True) else: LOG.warning('User "mapr" does not exists') def create_home_mapr(instance): target_path = '/home/mapr' LOG.debug("Creating home directory for user 'mapr'") args = {'path': target_path, 'user': '******', 'group': 'mapr'} cmd = ('mkdir -p %(path)s && chown %(user)s:%(group)s %(path)s' % args) if self.mapr_user_exists(instance): with instance.remote() as r: r.execute_command(cmd, run_as_root=True) else: LOG.warning('User "mapr" does not exists') util.execute_on_instances(instances, set_user_password) util.execute_on_instances(instances, create_home_mapr)
def _install_security_repos(self, cluster_context, instances): LOG.debug("Installing security repos") @el.provision_event() def install_security_repos(instance): return util.run_script(instance, ADD_SECURITY_REPO_SCRIPT, "root") util.execute_on_instances(instances, install_security_repos)
def _install_mapr_repo(self, cluster_context, instances): distro_name = cluster_context.distro.name @el.provision_event() def install_mapr_repos(instance): return util.run_script(instance, ADD_MAPR_REPO_SCRIPT, "root", distro_name, **cluster_context.mapr_repos) util.execute_on_instances(instances, install_mapr_repos)
def _configure_ssh_connection(self, cluster_context, instances): @el.provision_event() def configure_ssh(instance): echo_param = 'echo "KeepAlive yes" >> ~/.ssh/config' echo_timeout = 'echo "ServerAliveInterval 60" >> ~/.ssh/config' with instance.remote() as r: r.execute_command(echo_param) r.execute_command(echo_timeout) util.execute_on_instances(instances, configure_ssh)
def _install_java(self, cluster_context, instances): LOG.debug("Installing Java") distro_name = cluster_context.distro.name @el.provision_event() def install_java(instance): return util.run_script(instance, INSTALL_JAVA_SCRIPT, "root", distro_name) util.execute_on_instances(instances, install_java)
def _install_mysql_client(self, cluster_context, instances): LOG.debug("Installing MySQL client") distro_name = cluster_context.distro.name @el.provision_event() def install_mysql_client(instance): return util.run_script(instance, INSTALL_MYSQL_CLIENT, "root", distro_name) util.execute_on_instances(instances, install_mysql_client)
def _set_cluster_mode(self, cluster_context, instances): cluster_mode = cluster_context.cluster_mode if not cluster_mode: return command = "maprcli cluster mapreduce set -mode %s" % cluster_mode @el.provision_event() def set_cluster_mode(instance): return util.execute_command([instance], command, run_as='mapr') util.execute_on_instances(instances, set_cluster_mode)
def _write_config_files(self, cluster_context, instances): LOG.debug('Writing config files') @el.provision_event() def write_config_files(instance, config_files): for file in config_files: util.write_file(instance, file.path, file.data, mode=file.mode, owner="mapr") node_groups = util.unique_list(instances, lambda i: i.node_group) for node_group in node_groups: config_files = cluster_context.get_config_files(node_group) ng_instances = [i for i in node_group.instances if i in instances] util.execute_on_instances(ng_instances, write_config_files, config_files=config_files) LOG.debug("Config files are successfully written")
def _wait_for_status(self, instances, status, sleep=3, timeout=60): def poll_status(instance): operation_name = _('Wait for {node_process} on {instance}' ' to change status to "{status}"') args = { 'node_process': self.ui_name, 'instance': instance.instance_name, 'status': status.name, } return plugin_utils.poll( get_status=lambda: self.status(instance) == status, operation_name=operation_name.format(**args), timeout=timeout, sleep=sleep, ) util.execute_on_instances(instances, poll_status)
def _configure_topology(self, cluster_context, instances): LOG.debug("Configuring cluster topology") topology_map = cluster_context.topology_map topology_map = ("%s %s" % item for item in topology_map.items()) topology_map = "\n".join(topology_map) + "\n" data_path = "%s/topology.data" % cluster_context.mapr_home script = utils.get_file_text(_TOPO_SCRIPT, 'sahara_plugin_mapr') script_path = '%s/topology.sh' % cluster_context.mapr_home @el.provision_event() def write_topology_data(instance): util.write_file(instance, data_path, topology_map, owner="root") util.write_file(instance, script_path, script, mode="+x", owner="root") util.execute_on_instances(instances, write_topology_data) LOG.info('Cluster topology successfully configured')
def _install(_context, _instances): g.execute_on_instances(_instances, self._install_packages_on_instance, _context)
def _rebuild(self, cluster_context, instances): OOZIE.stop(list(filter(OOZIE.is_started, instances))) g.execute_on_instances(instances, self._rebuild_oozie_war, cluster_context) OOZIE.start(instances) context.sleep(OOZIE_START_DELAY)