def configure_htcondor(self): """ Configure environment for running HTCondor service over a node. """ all_done = False try: htcondor_params = {} if self.srv_type == "master": condor_template = conf_manager.load_conf_template(conf_manager.HTCONDOR_MASTER_CONF_TEMPLATE) # log.debug("Condor template: {0}".format(condor_template)) htcondor_params["flock_host"] = self.flock_to else: condor_template = conf_manager.load_conf_template(conf_manager.HTCONDOR_WOORKER_CONF_TEMPLATE) htcondor_params = { "host": self.host } log.debug("HTCondor params: {0}".format(str(htcondor_params))) condor_template = condor_template.substitute(htcondor_params) if os.path.exists(paths.P_HTCONDOR_CONFIG_PATH): with open(paths.P_HTCONDOR_CONFIG_PATH, 'a') as f: print >> f, condor_template misc.run(paths.P_HTCONDOR_HOME + "/condor restart") all_done = True self.state = service_states.RUNNING else: log.error("HTCondor config file {0} not found!" .format(paths.P_HTCONDOR_CONFIG_PATH)) except Exception, e: log.debug("Error while configuring HTCondor: {0}".format(e)) self.state = service_states.ERROR all_done = False
def configure_htcondor(self): """ Configure environment for running HTCondor service over a node. """ all_done = False try: htcondor_params = {} if self.srv_type == "master": condor_template = conf_manager.load_conf_template( conf_manager.HTCONDOR_MASTER_CONF_TEMPLATE) # log.debug("Condor template: {0}".format(condor_template)) htcondor_params["flock_host"] = self.flock_to else: condor_template = conf_manager.load_conf_template( conf_manager.HTCONDOR_WOORKER_CONF_TEMPLATE) htcondor_params = {"host": self.host} log.debug("HTCondor params: {0}".format(str(htcondor_params))) condor_template = condor_template.substitute(htcondor_params) if os.path.exists(paths.P_HTCONDOR_CONFIG_PATH): with open(paths.P_HTCONDOR_CONFIG_PATH, 'a') as f: print >> f, condor_template misc.run(paths.P_HTCONDOR_HOME + "/condor restart") all_done = True self.state = service_states.RUNNING else: log.error("HTCondor config file {0} not found!".format( paths.P_HTCONDOR_CONFIG_PATH)) except Exception, e: log.debug("Error while configuring HTCondor: {0}".format(e)) self.state = service_states.ERROR all_done = False
def _get_sge_install_conf(app, host): """Fills and returns the SGE_INSTALL_TEMPLATE as a string. Positional arguments: app -- The cloudman app host -- the host param string is used for admin, submit and exec hosts """ # Add master as an execution host # Additional execution hosts will be added later, as they start sge_install_template = conf_manager.load_conf_template( conf_manager.SGE_INSTALL_TEMPLATE) sge_params = { "cluster_name": "GalaxyEC2", "admin_host_list": host, "submit_host_list": host, "exec_host_list": host, "hostname_resolving": "true", } for key, value in app.config.iteritems(): if key.startswith("sge_"): key = key[len("sge_"):] sge_params[key] = value return sge_install_template.substitute(sge_params)
def configure_proftpd(self): """ Configure environment for running ProFTPd service. """ log.debug("Configuring ProFTPd") # Because we're rewriting the proftpd config file below, update the # password for PostgreSQL galaxyftp user gftp_pwd = self.app.path_resolver.proftpd_galaxyftp_user_pwd log.debug("Setting psql password for galaxyftp role to {0}".format(gftp_pwd)) # Check if galaxtftp role already exists cmd = ('{0} - postgres -c"{1} -p {2} -tAc\\\"SELECT 1 FROM pg_roles WHERE rolname=\'galaxyftp\'\\\""' .format(paths.P_SU, self.app.path_resolver.psql_cmd, self.app.path_resolver.psql_db_port)) role = 'ALTER' if misc.getoutput(cmd) == '1' else 'CREATE' # Now either CREATE or ALTER the galaxyftp role to set the permissions cmd = ('{0} - postgres -c"{1} -p {4} -c\\\"{2} ROLE galaxyftp LOGIN PASSWORD \'{3}\'\\\""' .format(paths.P_SU, self.app.path_resolver.psql_cmd, role, gftp_pwd, self.app.path_resolver.psql_db_port)) misc.run(cmd) # Update the config to match the current environment proftpd_tmplt = conf_manager.PROFTPD_CONF_TEMPLATE proftpd_conf_template = conf_manager.load_conf_template(proftpd_tmplt) params = { 'galaxy_user_name': paths.GALAXY_USER_NAME, 'galaxyftp_user_name': 'galaxyftp', 'psql_galaxyftp_password': gftp_pwd, 'galaxy_db_port': self.app.path_resolver.psql_db_port, 'galaxyFS_base_path': self.app.path_resolver.galaxy_data, 'public_ip_address': self.app.cloud_interface.get_public_ip() } template = proftpd_conf_template.substitute(params) # Write out the config file with open(self.app.path_resolver.proftpd_conf_file, 'w') as f: print >> f, template log.debug("Updated ProFTPd conf file {0}".format( self.app.path_resolver.proftpd_conf_file)) # Place the FTP welcome message file urllib.urlretrieve("https://s3.amazonaws.com/cloudman/files/proftpd_welcome.txt", "/usr/proftpd/etc/welcome_msg.txt") # Setup the Galaxy data dir for FTP ftp_data_dir = '%s/tmp/ftp' % self.app.path_resolver.galaxy_data if not os.path.exists(ftp_data_dir): os.makedirs(ftp_data_dir) attempt_chown_galaxy(ftp_data_dir) # Some images have vsFTPd server included so stop it first vsFTPd_exists = misc.run('status vsftpd', quiet=True) if vsFTPd_exists and 'start/running' in vsFTPd_exists: log.debug("Stopping vsFTPd") misc.run('stop vsftpd') # Start the server now if misc.run('/etc/init.d/proftpd start'): self.state = service_states.RUNNING return True else: log.error("Trouble starting ProFTPd") self.state = service_states.ERROR return False
def _build_slurm_conf(): log.debug("Setting slurm.conf parameters") # Make sure the slurm root dir exists and is owned by slurm user misc.make_dir(self.app.path_resolver.slurm_root_tmp) os.chown(self.app.path_resolver.slurm_root_tmp, pwd.getpwnam("slurm")[2], grp.getgrnam("slurm")[2]) worker_nodes, worker_names = _worker_nodes_conf() slurm_conf_template = conf_manager.load_conf_template(conf_manager.SLURM_CONF_TEMPLATE) slurm_conf_params = { "master_hostname": misc.get_hostname(), "num_cpus": max(self.app.manager.num_cpus - 1, 1), # Reserve 1 CPU "total_memory": max(1, self.app.manager.total_memory / 1024), "slurm_root_tmp": self.app.path_resolver.slurm_root_tmp, "worker_nodes": worker_nodes, "worker_names": worker_names } return slurm_conf_template.substitute(slurm_conf_params)
def _configure(self): """ Setup Supervisor for running via CloudMan by creating ``supervisord.conf`` file. """ log.debug("Configuring supervisord") # Create supervisord config file sv_vars = { 'supervisord_pid': self.pid_file, 'sv_port': self.sv_port, 'conf_dir': self.conf_dir, 'supervisord_log': self.log_file } template = conf_manager.load_conf_template(conf_manager.SUPERVISOR_TEMPLATE) misc.write_template_file(template, sv_vars, self.main_conf_file) # Make sure the config dir exists for programs managed by supervisor misc.make_dir(self.conf_dir)
def _configure(self): """ Setup Supervisor for running via CloudMan by creating ``supervisord.conf`` file. """ log.debug("Configuring supervisord") # Create supervisord config file sv_vars = { 'supervisord_pid': self.pid_file, 'sv_port': self.sv_port, 'conf_dir': self.conf_dir, 'supervisord_log': self.log_file } template = conf_manager.load_conf_template( conf_manager.SUPERVISOR_TEMPLATE) misc.write_template_file(template, sv_vars, self.main_conf_file) # Make sure the config dir exists for programs managed by supervisor misc.make_dir(self.conf_dir)
def _update_job_conf(app, num_handlers, plugin_id='slurm'): """ Update Galaxy job_conf.xml. At the moment, only job handlers are set. Number of handlers are added that correspond to ``num_handlers``. """ galaxy_config_dir = app.path_resolver.galaxy_config_dir job_conf_file_path = join(galaxy_config_dir, 'job_conf.xml') log.debug("Updating Galaxy's job conf file {0}".format(job_conf_file_path)) template = conf_manager.load_conf_template( conf_manager.GALAXY_JOB_CONF_TEMPLATE) handler_xml = "" for i in range(num_handlers): handler_xml += ( '\t\t<handler id="handler{0}" tags="handlers" />\n'.format(i)) params = {'cloudman_handlers': handler_xml} misc.write_template_file(template, params, job_conf_file_path)
def _write_template_file(self, template_file, parameters, conf_file): """ Given a plain text `template_file` path and appropriate `parameters`, load the file as a `string.Template`, substitute the `parameters` and write out the file to the `conf_file` path. """ template = conf_manager.load_conf_template(template_file) try: t = template.substitute(parameters) # create conf directory if required if not os.path.exists(os.path.dirname(conf_file)): log.debug("Configuration path does not exist. Creating path: {0}".format(os.path.dirname(conf_file))) os.makedirs(os.path.dirname(conf_file)) # Write out the file with open(conf_file, 'w') as f: print >> f, t log.debug("Wrote Nginx config file {0}".format(conf_file)) except KeyError, kexc: log.error("KeyError filling template {0}: {1}".format(template_file, kexc))
def _setup(self): log.debug("Running GalaxyReportsService _setup") # WORKAROUND: The run_reports.sh command refers to a parameter # named --safe-pidfile which is not supported by the uwsgi binary. # Replace it with --pidfile instead. patch_start_command = ("sudo sed -i \"s/--safe-pidfile/--pidfile/g" "\" %s/scripts/common_startup_functions.sh" % self.galaxy_home) misc.run(patch_start_command) # Create default output dir for files file_path = os.path.join(self.app.path_resolver.galaxy_home, "database/files") misc.make_dir(file_path, owner='galaxy') tmp_file_path = os.path.join(self.app.path_resolver.galaxy_home, "database/tmp") misc.make_dir(tmp_file_path, owner='galaxy') # Create the new reports config params = { 'galaxy_db_port': self.app.path_resolver.psql_db_port } template = conf_manager.load_conf_template(conf_manager.GALAXY_REPORTS_TEMPLATE) misc.write_template_file(template, params, self.conf_file) attempt_chown_galaxy(self.conf_file)
def _get_sge_install_conf(app, host): """Fills and returns the SGE_INSTALL_TEMPLATE as a string. Positional arguments: app -- The cloudman app host -- the host param string is used for admin, submit and exec hosts """ # Add master as an execution host # Additional execution hosts will be added later, as they start sge_install_template = conf_manager.load_conf_template(conf_manager.SGE_INSTALL_TEMPLATE) sge_params = { "cluster_name": "GalaxyEC2", "admin_host_list": host, "submit_host_list": host, "exec_host_list": host, "hostname_resolving": "true", } for key, value in app.config.iteritems(): if key.startswith("sge_"): key = key[len("sge_"):] sge_params[key] = value return sge_install_template.substitute(sge_params)
def _configure_sge(self): log.info("Setting up SGE...") SGE_config_file = '%s/galaxyEC2.conf' % self.app.path_resolver.sge_root with open(SGE_config_file, 'w') as f: print >> f, _get_sge_install_conf( self.app, self.app.cloud_interface.get_private_ip()) os.chown(SGE_config_file, pwd.getpwnam("sgeadmin")[2], grp.getgrnam("sgeadmin")[2]) log.debug("Created SGE install template as file '%s'" % SGE_config_file) fix_libc() log.debug("Setting up SGE.") self._fix_util_arch() if misc.run( 'cd %s; ./inst_sge -m -x -auto %s' % (self.app.path_resolver.sge_root, SGE_config_file), "Setting up SGE did not go smoothly", "Successfully set up SGE"): log.debug("Successfully setup SGE; configuring SGE") log.debug("Adding parallel environments") pes = ['SGE_SMP_PE', 'SGE_MPI_PE'] for pe in pes: pe_file_path = os.path.join('/tmp', pe) with open(pe_file_path, 'w') as f: print >> f, conf_manager.load_conf_template( getattr(conf_manager, pe)).safe_substitute() misc.run('cd %s; ./bin/lx24-amd64/qconf -Ap %s' % (self.app.path_resolver.sge_root, pe_file_path)) log.debug("Creating queue 'all.q'") SGE_allq_file = '%s/all.q.conf' % self.app.path_resolver.sge_root all_q_template = conf_manager.load_conf_template( conf_manager.SGE_ALL_Q_TEMPLATE) if self.app.config.hadoop_enabled: all_q_params = { "slots": int(commands.getoutput("nproc")), "prolog_path": os.path.join( paths.P_HADOOP_HOME, paths.P_HADOOP_INTEGRATION_FOLDER + "/hdfsstart.sh"), "epilog_path": os.path.join( paths.P_HADOOP_HOME, paths.P_HADOOP_INTEGRATION_FOLDER + "/hdfsstop.sh") } else: all_q_params = { "slots": int(commands.getoutput("nproc")), "prolog_path": 'NONE', "epilog_path": 'NONE' } with open(SGE_allq_file, 'w') as f: print >> f, all_q_template.substitute(all_q_params) os.chown(SGE_allq_file, pwd.getpwnam("sgeadmin")[2], grp.getgrnam("sgeadmin")[2]) log.debug("Created SGE all.q template as file '%s'" % SGE_allq_file) misc.run( 'cd %s; ./bin/lx24-amd64/qconf -Mq %s' % (self.app.path_resolver.sge_root, SGE_allq_file), "Error modifying all.q", "Successfully modified all.q") log.debug("Configuring users' SGE profiles") misc.append_to_file( paths.LOGIN_SHELL_SCRIPT, "\nexport SGE_ROOT=%s" % self.app.path_resolver.sge_root) misc.append_to_file(paths.LOGIN_SHELL_SCRIPT, "\n. $SGE_ROOT/default/common/settings.sh\n") # Write out the .sge_request file for individual users sge_request_template = conf_manager.load_conf_template( conf_manager.SGE_REQUEST_TEMPLATE) sge_request_params = { 'psql_home': self.app.path_resolver.pg_home, 'galaxy_tools_dir': self.app.path_resolver.galaxy_tools, } users = ['galaxy', 'ubuntu'] for user in users: sge_request_file = os.path.join('/home', user, '.sge_request') with open(sge_request_file, 'w') as f: print >> f, sge_request_template.substitute( sge_request_params) os.chown(sge_request_file, pwd.getpwnam(user)[2], grp.getgrnam(user)[2]) return True return False
def _add_instance_as_exec_host(self, inst_alias, inst_private_ip): """ Add instance with ``inst_alias`` and ``inst_private_ip`` to the SGE execution host list. ``inst_alias`` is used only in log statements while the the ``inst_private_ip`` is the IP address (or hostname) of the given instance, which must be visible (i.e., accessible) to the other nodes in the clusters. """ ok = True # Check if host is already in the exec host list cmd = "export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; %s/bin/lx24-amd64/qconf -sel" \ % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root) proc = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() if inst_private_ip in stdout: log.debug( "Instance '%s' already in SGE execution host list" % inst_alias) else: log.debug( "Adding instance '%s' to SGE execution host list." % inst_alias) # Create a dir to hold all of workers host configuration files host_conf_dir = "%s/host_confs" % self.app.path_resolver.sge_root if not os.path.exists(host_conf_dir): subprocess.call('mkdir -p %s' % host_conf_dir, shell=True) os.chown(host_conf_dir, pwd.getpwnam( "sgeadmin")[2], grp.getgrnam("sgeadmin")[2]) host_conf_file = os.path.join(host_conf_dir, str(inst_alias)) with open(host_conf_file, 'w') as f: print >> f, conf_manager.load_conf_template(conf_manager.SGE_HOST_CONF_TEMPLATE).substitute({'hostname': inst_private_ip}) os.chown(host_conf_file, pwd.getpwnam("sgeadmin")[ 2], grp.getgrnam("sgeadmin")[2]) log.debug( "Created SGE host configuration template as file '%s'." % host_conf_file) # Add worker instance as execution host to SGE cmd = 'export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; %s/bin/lx24-amd64/qconf -Ae %s' \ % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root, host_conf_file) log.debug("Add SGE exec host cmd: {0}".format(cmd)) proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if proc.wait() == 0: log.debug("Successfully added instance '%s' w/ private IP '%s' as an execution host." % (inst_alias, inst_private_ip)) else: ok = False log.error("Process encountered problems adding instance '%s' as an SGE execution host. " "Process returned code %s" % (inst_alias, proc.returncode)) stderr = stdout = None stdout, stderr = proc.communicate() log.debug(" - adding instance '%s' SGE execution host stdout (private IP: %s): '%s'" % (inst_alias, inst_private_ip, stdout)) log.debug(" - adding instance '%s' SGE execution host stderr (private IP: %s): '%s'" % (inst_alias, inst_private_ip, stderr)) # == Add given instance's hostname to @allhosts # Check if instance is already in allhosts file and do not recreate the # file if so. # Additional documentation: allhosts file can be generated by CloudMan # each time an instance is added or removed. The file is generated based # on the Instance object CloudMan keeps track of and, as a result, it # includes all of the instances listed. So, some instances, although they # have yet to go through the addition process, might have had their IPs # already included in the allhosts file. This approach ensures consistency # between SGE and CloudMan and has been working much better than trying # to sync the two via other methods. proc = subprocess.Popen("export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; " "%s/bin/lx24-amd64/qconf -shgrp @allhosts" % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root), shell=True, stdout=subprocess.PIPE) allhosts_out = proc.communicate()[0] if inst_private_ip not in allhosts_out: now = datetime.datetime.utcnow() ah_file = '/tmp/ah_add_' + now.strftime("%H_%M_%S") self._write_allhosts_file(filename=ah_file, to_add=inst_private_ip) if not misc.run('export SGE_ROOT=%s;. $SGE_ROOT/default/common/settings.sh; ' '%s/bin/lx24-amd64/qconf -Mhgrp %s' % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root, ah_file), "Problems updating @allhosts aimed at adding '%s'" % inst_alias, "Successfully updated @allhosts to add '%s' with address '%s'" % (inst_alias, inst_private_ip)): ok = False else: log.debug("Instance '%s' IP is already in SGE's @allhosts" % inst_alias) # On instance reboot, SGE might have already been configured for a given # instance and this method will fail along the way although the instance # will still operate within SGE so don't explicitly state it was added. if ok: log.debug("Successfully added instance '%s' to SGE" % inst_alias) return ok
def _configure_sge(self): log.info("Setting up SGE...") SGE_config_file = '%s/galaxyEC2.conf' % self.app.path_resolver.sge_root with open(SGE_config_file, 'w') as f: print >> f, _get_sge_install_conf(self.app, self.app.cloud_interface.get_private_ip()) os.chown(SGE_config_file, pwd.getpwnam("sgeadmin")[2], grp.getgrnam("sgeadmin")[2]) log.debug( "Created SGE install template as file '%s'" % SGE_config_file) fix_libc() log.debug("Setting up SGE.") self._fix_util_arch() if misc.run('cd %s; ./inst_sge -m -x -auto %s' % (self.app.path_resolver.sge_root, SGE_config_file), "Setting up SGE did not go smoothly", "Successfully set up SGE"): log.debug("Successfully setup SGE; configuring SGE") log.debug("Adding parallel environments") pes = ['SGE_SMP_PE', 'SGE_MPI_PE'] for pe in pes: pe_file_path = os.path.join('/tmp', pe) with open(pe_file_path, 'w') as f: print >> f, conf_manager.load_conf_template(getattr(conf_manager, pe)).safe_substitute() misc.run('cd %s; ./bin/lx24-amd64/qconf -Ap %s' % ( self.app.path_resolver.sge_root, pe_file_path)) log.debug("Creating queue 'all.q'") SGE_allq_file = '%s/all.q.conf' % self.app.path_resolver.sge_root all_q_template = conf_manager.load_conf_template(conf_manager.SGE_ALL_Q_TEMPLATE) if self.app.config.hadoop_enabled: all_q_params = { "slots": int(commands.getoutput("nproc")), "prolog_path": os.path.join(paths.P_HADOOP_HOME, paths.P_HADOOP_INTEGRATION_FOLDER + "/hdfsstart.sh"), "epilog_path": os.path.join(paths.P_HADOOP_HOME, paths.P_HADOOP_INTEGRATION_FOLDER + "/hdfsstop.sh") } else: all_q_params = { "slots": int(commands.getoutput("nproc")), "prolog_path": 'NONE', "epilog_path": 'NONE' } with open(SGE_allq_file, 'w') as f: print >> f, all_q_template.substitute( all_q_params) os.chown(SGE_allq_file, pwd.getpwnam("sgeadmin")[ 2], grp.getgrnam("sgeadmin")[2]) log.debug( "Created SGE all.q template as file '%s'" % SGE_allq_file) misc.run( 'cd %s; ./bin/lx24-amd64/qconf -Mq %s' % ( self.app.path_resolver.sge_root, SGE_allq_file), "Error modifying all.q", "Successfully modified all.q") log.debug("Configuring users' SGE profiles") misc.append_to_file(paths.LOGIN_SHELL_SCRIPT, "\nexport SGE_ROOT=%s" % self.app.path_resolver.sge_root) misc.append_to_file(paths.LOGIN_SHELL_SCRIPT, "\n. $SGE_ROOT/default/common/settings.sh\n") # Write out the .sge_request file for individual users sge_request_template = conf_manager.load_conf_template(conf_manager.SGE_REQUEST_TEMPLATE) sge_request_params = { 'psql_home': self.app.path_resolver.pg_home, 'galaxy_tools_dir': self.app.path_resolver.galaxy_tools, } users = ['galaxy', 'ubuntu'] for user in users: sge_request_file = os.path.join('/home', user, '.sge_request') with open(sge_request_file, 'w') as f: print >> f, sge_request_template.substitute(sge_request_params) os.chown(sge_request_file, pwd.getpwnam(user)[2], grp.getgrnam(user)[2]) return True return False
def _add_instance_as_exec_host(self, inst_alias, inst_private_ip): """ Add instance with ``inst_alias`` and ``inst_private_ip`` to the SGE execution host list. ``inst_alias`` is used only in log statements while the the ``inst_private_ip`` is the IP address (or hostname) of the given instance, which must be visible (i.e., accessible) to the other nodes in the clusters. """ ok = True # Check if host is already in the exec host list cmd = "export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; %s/bin/lx24-amd64/qconf -sel" \ % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root) proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() if inst_private_ip in stdout: log.debug("Instance '%s' already in SGE execution host list" % inst_alias) else: log.debug("Adding instance '%s' to SGE execution host list." % inst_alias) # Create a dir to hold all of workers host configuration files host_conf_dir = "%s/host_confs" % self.app.path_resolver.sge_root if not os.path.exists(host_conf_dir): subprocess.call('mkdir -p %s' % host_conf_dir, shell=True) os.chown(host_conf_dir, pwd.getpwnam("sgeadmin")[2], grp.getgrnam("sgeadmin")[2]) host_conf_file = os.path.join(host_conf_dir, str(inst_alias)) with open(host_conf_file, 'w') as f: print >> f, conf_manager.load_conf_template( conf_manager.SGE_HOST_CONF_TEMPLATE).substitute( {'hostname': inst_private_ip}) os.chown(host_conf_file, pwd.getpwnam("sgeadmin")[2], grp.getgrnam("sgeadmin")[2]) log.debug("Created SGE host configuration template as file '%s'." % host_conf_file) # Add worker instance as execution host to SGE cmd = 'export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; %s/bin/lx24-amd64/qconf -Ae %s' \ % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root, host_conf_file) log.debug("Add SGE exec host cmd: {0}".format(cmd)) proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if proc.wait() == 0: log.debug( "Successfully added instance '%s' w/ private IP '%s' as an execution host." % (inst_alias, inst_private_ip)) else: ok = False log.error( "Process encountered problems adding instance '%s' as an SGE execution host. " "Process returned code %s" % (inst_alias, proc.returncode)) stderr = stdout = None stdout, stderr = proc.communicate() log.debug( " - adding instance '%s' SGE execution host stdout (private IP: %s): '%s'" % (inst_alias, inst_private_ip, stdout)) log.debug( " - adding instance '%s' SGE execution host stderr (private IP: %s): '%s'" % (inst_alias, inst_private_ip, stderr)) # == Add given instance's hostname to @allhosts # Check if instance is already in allhosts file and do not recreate the # file if so. # Additional documentation: allhosts file can be generated by CloudMan # each time an instance is added or removed. The file is generated based # on the Instance object CloudMan keeps track of and, as a result, it # includes all of the instances listed. So, some instances, although they # have yet to go through the addition process, might have had their IPs # already included in the allhosts file. This approach ensures consistency # between SGE and CloudMan and has been working much better than trying # to sync the two via other methods. proc = subprocess.Popen( "export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; " "%s/bin/lx24-amd64/qconf -shgrp @allhosts" % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root), shell=True, stdout=subprocess.PIPE) allhosts_out = proc.communicate()[0] if inst_private_ip not in allhosts_out: now = datetime.datetime.utcnow() ah_file = '/tmp/ah_add_' + now.strftime("%H_%M_%S") self._write_allhosts_file(filename=ah_file, to_add=inst_private_ip) if not misc.run( 'export SGE_ROOT=%s;. $SGE_ROOT/default/common/settings.sh; ' '%s/bin/lx24-amd64/qconf -Mhgrp %s' % (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root, ah_file), "Problems updating @allhosts aimed at adding '%s'" % inst_alias, "Successfully updated @allhosts to add '%s' with address '%s'" % (inst_alias, inst_private_ip)): ok = False else: log.debug("Instance '%s' IP is already in SGE's @allhosts" % inst_alias) # On instance reboot, SGE might have already been configured for a given # instance and this method will fail along the way although the instance # will still operate within SGE so don't explicitly state it was added. if ok: log.debug("Successfully added instance '%s' to SGE" % inst_alias) return ok
def configure_proftpd(self): """ Configure environment for running ProFTPd service. """ log.debug("Configuring ProFTPd") # Because we're rewriting the proftpd config file below, update the # password for PostgreSQL galaxyftp user gftp_pwd = self.app.path_resolver.proftpd_galaxyftp_user_pwd log.debug( "Setting psql password for galaxyftp role to {0}".format(gftp_pwd)) # Check if galaxtftp role already exists cmd = ( '{0} - postgres -c"{1} -p {2} -tAc\\\"SELECT 1 FROM pg_roles WHERE rolname=\'galaxyftp\'\\\""' .format(paths.P_SU, self.app.path_resolver.psql_cmd, self.app.path_resolver.psql_db_port)) role = 'ALTER' if misc.getoutput(cmd) == '1' else 'CREATE' # Now either CREATE or ALTER the galaxyftp role to set the permissions cmd = ( '{0} - postgres -c"{1} -p {4} -c\\\"{2} ROLE galaxyftp LOGIN PASSWORD \'{3}\'\\\""' .format(paths.P_SU, self.app.path_resolver.psql_cmd, role, gftp_pwd, self.app.path_resolver.psql_db_port)) misc.run(cmd) # Update the config to match the current environment proftpd_tmplt = conf_manager.PROFTPD_CONF_TEMPLATE proftpd_conf_template = conf_manager.load_conf_template(proftpd_tmplt) params = { 'galaxy_user_name': paths.GALAXY_USER_NAME, 'galaxyftp_user_name': 'galaxyftp', 'psql_galaxyftp_password': gftp_pwd, 'galaxy_db_port': self.app.path_resolver.psql_db_port, 'galaxyFS_base_path': self.app.path_resolver.galaxy_data, 'public_ip_address': self.app.cloud_interface.get_public_ip() } template = proftpd_conf_template.substitute(params) # Write out the config file with open(self.app.path_resolver.proftpd_conf_file, 'w') as f: print >> f, template log.debug("Updated ProFTPd conf file {0}".format( self.app.path_resolver.proftpd_conf_file)) # Place the FTP welcome message file urllib.urlretrieve( "https://s3.amazonaws.com/cloudman/files/proftpd_welcome.txt", "/usr/proftpd/etc/welcome_msg.txt") # Setup the Galaxy data dir for FTP ftp_data_dir = '%s/tmp/ftp' % self.app.path_resolver.galaxy_data if not os.path.exists(ftp_data_dir): os.makedirs(ftp_data_dir) attempt_chown_galaxy(ftp_data_dir) # Some images have vsFTPd server included so stop it first vsFTPd_exists = misc.run('status vsftpd', quiet=True) if vsFTPd_exists and 'start/running' in vsFTPd_exists: log.debug("Stopping vsFTPd") misc.run('stop vsftpd') # Start the server now if misc.run('/etc/init.d/proftpd start'): self.state = service_states.RUNNING return True else: log.error("Trouble starting ProFTPd") self.state = service_states.ERROR return False