def _clean_rabbitmq_env(): """ RabbitMQ fails to start if its database is embedded into the image because it saves the current IP address or host name so delete it now. When starting up, RabbitMQ will recreate that directory. """ print "Cleaning RabbitMQ environment" with settings(warn_only=True): sudo('/etc/init.d/rabbitmq-server stop') # If upstart script is used, upstart will restart rabbitmq upon stop sudo('initctl reload-configuration') with settings(warn_only=True): sudo('stop rabbitmq-server') if exists('/var/lib/rabbitmq/mnesia'): sudo('rm -rf /var/lib/rabbitmq/mnesia')
def _clean_galaxy_dir(): # Clean up galaxy directory before snapshoting with settings(warn_only=True): print(yellow("Cleaning Galaxy's directory")) if exists("%s/paster.log" % GALAXY_HOME): sudo("rm %s/paster.log" % GALAXY_HOME) sudo("rm %s/database/pbs/*" % GALAXY_HOME) # set up the symlink for SAMTOOLS (remove this code once SAMTOOLS is converted to data tables) if exists("%s/tool-data/sam_fa_indices.loc" % GALAXY_HOME): sudo("rm %s/tool-data/sam_fa_indices.loc" % GALAXY_HOME) tmp_loc = False if not exists("/mnt/galaxyIndices/galaxy/tool-data/sam_fa_indices.loc"): sudo("touch /mnt/galaxyIndices/galaxy/tool-data/sam_fa_indices.loc") tmp_loc = True sudo("ln -s /mnt/galaxyIndices/galaxy/tool-data/sam_fa_indices.loc %s/tool-data/sam_fa_indices.loc" % GALAXY_HOME) if tmp_loc: sudo("rm /mnt/galaxyIndices/galaxy/tool-data/sam_fa_indices.loc") # If needed, upload the custom cloud welcome screen files if not exists("%s/static/images/cloud.gif" % GALAXY_HOME): sudo("wget --output-document=%s/static/images/cloud.gif %s/cloud.gif" % (GALAXY_HOME, CDN_ROOT_URL)) if not exists("%s/static/images/cloud_txt.png" % GALAXY_HOME): sudo("wget --output-document=%s/static/images/cloud_text.png %s/cloud_text.png" % (GALAXY_HOME, CDN_ROOT_URL)) if not exists("%s/static/welcome.html" % GALAXY_HOME): sudo("wget --output-document=%s/static/welcome.html %s/welcome.html" % (GALAXY_HOME, CDN_ROOT_URL)) # Clean up configuration files form the snapshot to ensure those get # downloaded from cluster's (or default) bucket at cluster instantiation if exists("%s/universe_wsgi.ini.cloud" % GALAXY_HOME): sudo("rm %s/universe_wsgi.ini.cloud" % GALAXY_HOME) if exists("%s/tool_conf.xml.cloud" % GALAXY_HOME): sudo("rm %s/tool_conf.xml.cloud" % GALAXY_HOME) if exists("%s/tool_data_table_conf.xml.cloud" % GALAXY_HOME): sudo("rm %s/tool_data_table_conf.xml.cloud" % GALAXY_HOME)
def _cleanup_ec2(env): """Clean up any extra files after building. """ env.logger.info("Cleaning up for EC2 AMI creation") fnames = [ ".bash_history", "/var/log/firstboot.done", ".nx_setup_done", "/var/crash/*", "%s/ec2autorun.py.log" % env.install_dir ] for fname in fnames: sudo("rm -f %s" % fname) # Stop Apache from starting automatically at boot (it conflicts with Galaxy's nginx) sudo('/usr/sbin/update-rc.d -f apache2 remove') # RabbitMQ fails to start if its database is embedded into the image # because it saves the current IP address or host name so delete it now. # When starting up, RabbitMQ will recreate that directory. with settings(warn_only=True): sudo('/etc/init.d/rabbitmq-server stop') sudo('stop rabbitmq-server') sudo('/etc/init.d/rabbitmq-server stop') sudo('initctl reload-configuration') for db_location in ['/var/lib/rabbitmq/mnesia', '/mnesia']: if exists(db_location): sudo('rm -rf %s' % db_location) # remove existing ssh host key pairs # http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?AESDG-chapter-sharingamis.htm sudo("rm -f /etc/ssh/ssh_host_*")
def provision(): """Create a new cloud server instance""" if "server" not in env: util.exit("Please specify a target server") conn = cloud_connect() image = choose_cloud_option(conn.list_images, IMAGE_RE, "image") size = choose_cloud_option(conn.list_sizes, SIZE_RE, "size") root_password = getpass.getpass( "Choose a root password for the new server: ") ssh_key = util.get_ssh_key() users = ScriptDeployment(debian.make_user_script(os.environ["USER"], ssh_key)) # a task that first installs the ssh key, and then runs the script msd = MultiStepDeployment([SSHKeyDeployment(ssh_key), users]) out("Creating %s (%s) on %s" % (image.name, size.name, image.driver.name)) node = conn.deploy_node(name=env.server["name"], image=image, size=size, deploy=msd) out(node) while get_node(node.uuid).state != 0: dot() out("Node is up.") env.hosts[0] = env.host_string = node.public_ips[0] conf = server_conf.read(SERVER_CONF_PATH) conf[env.server["label"]]["hostname"] = node.public_ips[0] server_conf.write(conf, SERVER_CONF_PATH) set_root_password(node.uuid, root_password) #Make my shell zsh with settings(user="******"): packages.apt("zsh") login = os.environ["USER"] util.script("chsh --shell /bin/zsh " + login, name="Use zshell") out("Please set a password for %s on %s" % (login, env.host_string)) run("passwd " + login) return node
def _configure_nfs(env): nfs_dir = "/export/data" cloudman_dir = "/mnt/galaxyData/export" if not exists(nfs_dir): # For the case of rerunning this script, ensure the nfs_dir does # not exist (exists() method does not recognize it as a file because # by default it points to a non-existing dir/file). with settings(warn_only=True): sudo('rm -rf {0}'.format(nfs_dir)) sudo("mkdir -p %s" % os.path.dirname(nfs_dir)) sudo("ln -s %s %s" % (cloudman_dir, nfs_dir)) sudo("chown -R %s %s" % (env.user, os.path.dirname(nfs_dir))) # Setup /etc/exports paths, to be used as NFS mount points galaxy_data_mount = env.get("galaxy_data_mount", "/mnt/galaxyData") galaxy_indices_mount = env.get("galaxy_indices_mount", "/mnt/galaxyIndices") galaxy_tools_mount = env.get("galaxy_tools_mount", "/mnt/galaxyTools") exports = [ '/opt/sge *(rw,sync,no_root_squash,no_subtree_check)', '%s *(rw,sync,no_root_squash,subtree_check,no_wdelay)' % galaxy_data_mount, '%s *(rw,sync,no_root_squash,no_subtree_check)' % galaxy_indices_mount, '%s *(rw,sync,no_root_squash,no_subtree_check)' % galaxy_tools_mount, '%s *(rw,sync,no_root_squash,no_subtree_check)' % nfs_dir, '%s/openmpi *(rw,sync,no_root_squash,no_subtree_check)' % env.install_dir] append('/etc/exports', exports, use_sudo=True) # Create a symlink for backward compatibility where all of CloudMan's # stuff is expected to be in /opt/galaxy old_dir = '/opt/galaxy' # Because stow is used, the equivalent to CloudMan's expected path # is actually the parent of the install_dir so use it for the symlink new_dir = os.path.dirname(env.install_dir) if not exists(old_dir) and exists(new_dir): sudo('ln -s {0} {1}'.format(new_dir, old_dir)) env.logger.debug("Done configuring CloudMan NFS")
def install_proftpd(env): """Highly configurable GPL-licensed FTP server software. http://proftpd.org/ """ version = "1.3.4c" postgres_ver = "9.1" url = "ftp://ftp.tpnet.pl/pub/linux/proftpd/distrib/source/proftpd-%s.tar.gz" % version modules = "mod_sql:mod_sql_postgres:mod_sql_passwd" extra_modules = env.get("extra_proftp_modules", "") # Comma separated list of extra modules if extra_modules: modules = "%s:%s" % (modules, extra_modules.replace(",", ":")) install_dir = os.path.join(env.install_dir, 'proftpd') remote_conf_dir = os.path.join(install_dir, "etc") # skip install if already present if exists(remote_conf_dir): env.logger.debug( "ProFTPd seems to already be installed in {0}".format(install_dir)) return with _make_tmp_dir() as work_dir: with cd(work_dir): run("wget %s" % url) with settings(hide('stdout')): run("tar xvzf %s" % os.path.split(url)[1]) with cd("proftpd-%s" % version): run("CFLAGS='-I/usr/include/postgresql' ./configure --prefix=%s " \ "--disable-auth-file --disable-ncurses --disable-ident --disable-shadow " \ "--enable-openssl --with-modules=%s " \ "--with-libraries=/usr/lib/postgresql/%s/lib" % (install_dir, modules, postgres_ver)) sudo("make") sudo("make install") sudo("make clean") # Get the init.d startup script initd_script = 'proftpd.initd' initd_url = os.path.join(REPO_ROOT_URL, 'conf_files', initd_script) remote_file = "/etc/init.d/proftpd" sudo("wget --output-document=%s %s" % (remote_file, initd_url)) sed(remote_file, 'REPLACE_THIS_WITH_CUSTOM_INSTALL_DIR', install_dir, use_sudo=True) sudo("chmod 755 %s" % remote_file) # Set the configuration file conf_file = 'proftpd.conf' conf_url = os.path.join(REPO_ROOT_URL, 'conf_files', conf_file) remote_file = os.path.join(remote_conf_dir, conf_file) sudo("wget --output-document=%s %s" % (remote_file, conf_url)) sed(remote_file, 'REPLACE_THIS_WITH_CUSTOM_INSTALL_DIR', install_dir, use_sudo=True) # Get the custom welcome msg file welcome_msg_file = 'welcome_msg.txt' welcome_url = os.path.join(REPO_ROOT_URL, 'conf_files', welcome_msg_file) sudo("wget --output-document=%s %s" % (os.path.join(remote_conf_dir, welcome_msg_file), welcome_url)) # Stow sudo("cd %s; stow proftpd" % env.install_dir) env.logger.debug("----- ProFTPd %s installed to %s -----" % (version, install_dir))
def install(self, local_env={}): """ If not already installed, install given tool and all of its dependencies. """ if self.set_env(local_env): if not self.is_installed(local_env=local_env): print "Trying to install %s %s as user %s" % (self.tool_env['pkg_name'], self.tool_env['version'], env.user) packages = ['gcc', 'g++', 'octave3.0-headers', 'python-dev', 'python-numpy', 'liblapack-dev', 'libatlas3gf-base', 'python-numpy-ext', 'python-matplotlib', 'swig'] common.install_required_packages(packages) install_cmd = sudo if self.tool_env['use_sudo'] else run with common.make_tmp_dir(self.tool_env) as work_dir: with nested(cd(work_dir), settings(hide('stdout'))): install_cmd("wget %s" % self.tool_env['url']) install_cmd("tar xvjf %s" % os.path.split(self.tool_env['url'])[1]) with cd("shogun-%s/src" % self.tool_env['version']): install_cmd("./configure --prefix=%s --interfaces=libshogun,libshogunui,python,python_modular,octave" % self.tool_env['install_dir']) print "Making Shogun..." install_cmd("make") install_cmd("make install") install_cmd("echo 'export LD_LIBRARY_PATH=%s/lib:$LD_LIBRARY_PATH' > %s/env.sh" % (self.tool_env['install_dir'], self.tool_env['install_dir'])) install_cmd("cd %s/lib; ln -s python* python" % self.tool_env['install_dir']) install_cmd("echo 'export PYTHONPATH=%s/lib/python/dist-packages:$PYTHONPATH' >> %s/env.sh" % (self.tool_env['install_dir'], self.tool_env['install_dir'])) install_cmd("chmod +x %s/env.sh" % self.tool_env['install_dir']) install_dir_root = os.path.join(self.tool_env['install_dir_root'], self.tool_env['pkg_name']) install_cmd('if [ ! -d %s/default ]; then ln -s %s %s/default; fi' % (install_dir_root, self.tool_env['install_dir'], install_dir_root)) install_cmd('chown -R %s:%s %s' % (env.user, env.user, install_dir_root)) if self.tool_env['installed'] or self.is_installed(): return common.compose_successful_return(self.tool_env) print "----- Problem installing Shogun -----" return self.tool_env
def _install_postgresql(): version = "8.4.4" url = "http://wwwmaster.postgresql.org/redir/198/h/source/v%s/postgresql-%s.tar.gz" % (version, version) install_dir = os.path.join(env.install_dir, "postgresql") with _make_tmp_dir() as work_dir: with contextlib.nested(cd(work_dir), settings(hide('stdout'))): run("wget %s" % url) run("tar xvzf %s" % os.path.split(url)[1]) with cd("postgresql-%s" % version): run("./configure --prefix=%s" % install_dir) with settings(hide('stdout')): print "Making PostgreSQL..." run("make") sudo("make install") sudo("cd %s; stow postgresql" % env.install_dir) print(green("----- PostgreSQL installed -----"))
def install(self, local_env={}, force=False): """ If not already installed, install given tool and all of its dependencies. """ if self.set_env(local_env): if (not self.is_installed(local_env=local_env) and not self.tool_env['fatal_error']) or force: if not self.tool_env['dependencies_ok']: if not self.resolve_dependencies(): print "----- ERROR resolving dependencies -----" return False # Maybe the dependencies is all that was missing so check if # the tool can be considered as installed now if not self.is_installed(local_env=local_env) or force: print "Trying to install %s %s as user %s" % (self.tool_env['pkg_name'], self.tool_env['version'], env.user) # TODO: Get complete file name list files = ['datagen.py'] install_cmd = sudo if self.tool_env['use_sudo'] else run if not exists(self.tool_env['install_dir']): install_cmd("mkdir -p %s" % self.tool_env['install_dir']) install_cmd("chown %s %s" % (self.tool_env['user'], self.tool_env['install_dir'])) with nested(cd(self.tool_env['install_dir']), settings(hide('stdout'))): for f in files: install_cmd("wget --no-check-certificate %s" % (self.tool_env['url']+f)) if self.tool_env.has_key('shogun_env_script') and exists(self.tool_env['shogun_env_script']): install_cmd("echo '. %s' > %s/env.sh" % (self.tool_env['shogun_env_script'], self.tool_env['install_dir'])) install_cmd("chmod +x %s/env.sh" % self.tool_env['install_dir']) install_cmd('chown -R %s %s' % (env.user, self.tool_env['install_dir'])) else: print "ERROR: Required dependency file for not found (for Shogun)." self.tool_env['fatal_error'] = True # Make sure the tool installed correctly if not self.tool_env['fatal_error'] and (self.tool_env['installed'] or self.is_installed()): return common.compose_successful_return(self.tool_env) print "----- Problem installing EasySVM -----" return self.tool_env
def _configure_nfs(env): nfs_dir = "/export/data" cloudman_dir = "/mnt/galaxyData/export" if not exists(nfs_dir): # For the case of rerunning this script, ensure the nfs_dir does # not exist (exists() method does not recognize it as a file because # by default it points to a non-existing dir/file). with settings(warn_only=True): sudo('rm -rf {0}'.format(nfs_dir)) sudo("mkdir -p %s" % os.path.dirname(nfs_dir)) sudo("ln -s %s %s" % (cloudman_dir, nfs_dir)) sudo("chown -R %s %s" % (env.user, os.path.dirname(nfs_dir))) # Setup /etc/exports paths, to be used as NFS mount points exports = [ '/opt/sge *(rw,sync,no_root_squash,no_subtree_check)', '/mnt/galaxyData *(rw,sync,no_root_squash,subtree_check,no_wdelay)', '/mnt/galaxyIndices *(rw,sync,no_root_squash,no_subtree_check)', '/mnt/galaxyTools *(rw,sync,no_root_squash,no_subtree_check)', '%s *(rw,sync,no_root_squash,no_subtree_check)' % nfs_dir, '%s/openmpi *(rw,sync,no_root_squash,no_subtree_check)' % env.install_dir] append('/etc/exports', exports, use_sudo=True) # Create a symlink for backward compatibility where all of CloudMan's # stuff is expected to be in /opt/galaxy old_dir = '/opt/galaxy' # Because stow is used, the equivalent to CloudMan's expected path # is actually the parent of the install_dir so use it for the symlink new_dir = os.path.dirname(env.install_dir) if not exists(old_dir) and exists(new_dir): sudo('ln -s {0} {1}'.format(new_dir, old_dir)) env.logger.debug("Done configuring CloudMan NFS")
def install_proftpd(env): version = "1.3.3d" postgres_ver = "8.4" url = "ftp://mirrors.ibiblio.org/proftpd/distrib/source/proftpd-%s.tar.gz" % version install_dir = os.path.join(env.install_dir, 'proftpd') remote_conf_dir = os.path.join(install_dir, "etc") # skip install if already present if exists(remote_conf_dir): return with _make_tmp_dir() as work_dir: with cd(work_dir): run("wget %s" % url) with settings(hide('stdout')): run("tar xvzf %s" % os.path.split(url)[1]) with cd("proftpd-%s" % version): run("CFLAGS='-I/usr/include/postgresql' ./configure --prefix=%s --disable-auth-file --disable-ncurses --disable-ident --disable-shadow --enable-openssl --with-modules=mod_sql:mod_sql_postgres:mod_sql_passwd --with-libraries=/usr/lib/postgres/%s/lib" % (install_dir, postgres_ver)) sudo("make") sudo("make install") sudo("make clean") # Get init.d startup script initd_script = 'proftpd' initd_url = os.path.join(REPO_ROOT_URL, 'conf_files', initd_script) sudo("wget --output-document=%s %s" % (os.path.join('/etc/init.d', initd_script), initd_url)) sudo("chmod 755 %s" % os.path.join('/etc/init.d', initd_script)) # Get configuration files proftpd_conf_file = 'proftpd.conf' welcome_msg_file = 'welcome_msg.txt' conf_url = os.path.join(REPO_ROOT_URL, 'conf_files', proftpd_conf_file) welcome_url = os.path.join(REPO_ROOT_URL, 'conf_files', welcome_msg_file) sudo("wget --output-document=%s %s" % (os.path.join(remote_conf_dir, proftpd_conf_file), conf_url)) sudo("wget --output-document=%s %s" % (os.path.join(remote_conf_dir, welcome_msg_file), welcome_url)) sudo("cd %s; stow proftpd" % env.install_dir)
def _cleanup_ec2(env): """Clean up any extra files after building. """ env.logger.info("Cleaning up for EC2 AMI creation") fnames = [".bash_history", "/var/log/firstboot.done", ".nx_setup_done", "/var/crash/*", "%s/ec2autorun.py.log" % env.install_dir, "%s/ec2autorun.err" % env.install_dir, "%s/ec2autorun.log" % env.install_dir] for fname in fnames: sudo("rm -f %s" % fname) rmdirs = ["/mnt/galaxyData", "/mnt/cm", "/tmp/cm"] for rmdir in rmdirs: sudo("rm -rf %s" % rmdir) # Stop Apache from starting automatically at boot (it conflicts with Galaxy's nginx) sudo('/usr/sbin/update-rc.d -f apache2 remove') # RabbitMQ fails to start if its database is embedded into the image # because it saves the current IP address or host name so delete it now. # When starting up, RabbitMQ will recreate that directory. with settings(warn_only=True): sudo('/etc/init.d/rabbitmq-server stop') sudo('stop rabbitmq-server') sudo('/etc/init.d/rabbitmq-server stop') sudo('initctl reload-configuration') for db_location in ['/var/lib/rabbitmq/mnesia', '/mnesia']: if exists(db_location): sudo('rm -rf %s' % db_location) # remove existing ssh host key pairs # http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?AESDG-chapter-sharingamis.htm sudo("rm -f /etc/ssh/ssh_host_*")
def start_services(site_name): run('sudo service nginx reload') with settings( hide('warnings'), warn_only=True ): run('sudo start gunicorn-%s' % site_name)
def decorator(*args, **kwargs): with settings( hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True): result = run(pname) if result.return_code in [0, 1]: return func(*args, **kwargs)
def _create_virtual_host(source_folder, site_name): run('''cd %s && sed "s/SITENAME/%s/g" deploy_tools/nginx.template.conf | sudo tee /etc/nginx/sites-available/%s''' % (source_folder, site_name, site_name)) with settings( hide('warnings'), warn_only=True ): run('''sudo ln -s /etc/nginx/sites-available/%s /etc/nginx/sites-enabled/%s''' % (site_name, site_name))
def _update_system(): """Runs standard system update""" _setup_sources() with settings(warn_only=True): # Some custom CBL sources don't always work so avoid a crash in that case sudo('apt-get -y update') run('export DEBIAN_FRONTEND=noninteractive; sudo -E apt-get upgrade -y --force-yes') # Ensure a completely noninteractive upgrade sudo('apt-get -y dist-upgrade') print(yellow("Done updating the system"))
def cmd_success(tool_env, cmd): """Run given command and return True of return code is 0, False otherwise""" install_cmd = sudo if tool_env['use_sudo'] else run with settings(warn_only=True): result = install_cmd(cmd) if result.return_code == 0: return True return False
def _install_nginx(): version = "1.2.0" upload_module_version = "2.2.0" upload_url = "http://www.grid.net.ru/nginx/download/" \ "nginx_upload_module-%s.tar.gz" % upload_module_version url = "http://nginx.org/download/nginx-%s.tar.gz" % version install_dir = os.path.join(env.install_dir, "nginx") remote_conf_dir = os.path.join(install_dir, "conf") # skip install if already present if exists(remote_conf_dir) and contains(os.path.join(remote_conf_dir, "nginx.conf"), "/cloud"): return with _make_tmp_dir() as work_dir: with contextlib.nested(cd(work_dir), settings(hide('stdout'))): run("wget %s" % upload_url) run("tar -xvzpf %s" % os.path.split(upload_url)[1]) run("wget %s" % url) run("tar xvzf %s" % os.path.split(url)[1]) with cd("nginx-%s" % version): run("./configure --prefix=%s --with-ipv6 --add-module=../nginx_upload_module-%s " "--user=galaxy --group=galaxy --with-http_ssl_module --with-http_gzip_static_module " "--with-cc-opt=-Wno-error --with-debug" % (install_dir, upload_module_version)) run("make") sudo("make install") with settings(warn_only=True): sudo("cd %s; stow nginx" % env.install_dir) nginx_conf_file = 'nginx.conf' url = os.path.join(REPO_ROOT_URL, nginx_conf_file) with cd(remote_conf_dir): sudo("wget --output-document=%s/%s %s" % (remote_conf_dir, nginx_conf_file, url)) nginx_errdoc_file = 'nginx_errdoc.tar.gz' url = os.path.join(REPO_ROOT_URL, nginx_errdoc_file) remote_errdoc_dir = os.path.join(install_dir, "html") with cd(remote_errdoc_dir): sudo("wget --output-document=%s/%s %s" % (remote_errdoc_dir, nginx_errdoc_file, url)) sudo('tar xvzf %s' % nginx_errdoc_file) cloudman_default_dir = "/opt/cloudman/sbin" sudo("mkdir -p %s" % cloudman_default_dir) if not exists("%s/nginx" % cloudman_default_dir): sudo("ln -s %s/sbin/nginx %s/nginx" % (install_dir, cloudman_default_dir)) print(green("----- nginx installed and configured -----"))
def _install_boto(): install_dir = env.install_dir + "/boto" with contextlib.nested(cd(env.install_dir), settings(hide('stdout'))): sudo("git clone http://github.com/boto/boto.git") with cd(install_dir): sudo("python setup.py install") version = run('python -c"import boto; print boto.__version__"') print(green("----- boto %s installed -----" % version))
def decorator(*args, **kwargs): # with settings(hide('warnings', 'running', 'stdout', 'stderr'), # warn_only=True): with settings(warn_only=True): result = run(pname) if result.return_code == 127: print(yellow("'%s' not installed; return code: '%s'" % (pname, result.return_code))) return func(*args, **kwargs) print(green("'%s' is already installed" % pname))
def install_proftpd(env): """Highly configurable GPL-licensed FTP server software. http://proftpd.org/ """ version = "1.3.4c" postgres_ver = "9.1" url = "ftp://ftp.tpnet.pl/pub/linux/proftpd/distrib/source/proftpd-%s.tar.gz" % version modules = "mod_sql:mod_sql_postgres:mod_sql_passwd" extra_modules = env.get("extra_proftp_modules", "") # Comma separated list of extra modules if extra_modules: modules = "%s:%s" % (modules, extra_modules.replace(",", ":")) install_dir = os.path.join(env.install_dir, 'proftpd') remote_conf_dir = os.path.join(install_dir, "etc") # Skip install if already available if env.safe_exists(remote_conf_dir): env.logger.debug("ProFTPd seems to already be installed in {0}".format(install_dir)) return with _make_tmp_dir() as work_dir: with cd(work_dir): env.safe_run("wget %s" % url) with settings(hide('stdout')): env.safe_run("tar xvzf %s" % os.path.split(url)[1]) with cd("proftpd-%s" % version): env.safe_run("CFLAGS='-I/usr/include/postgresql' ./configure --prefix=%s " "--disable-auth-file --disable-ncurses --disable-ident --disable-shadow " "--enable-openssl --with-modules=%s " "--with-libraries=/usr/lib/postgresql/%s/lib" % (install_dir, modules, postgres_ver)) env.safe_sudo("make") env.safe_sudo("make install") env.safe_sudo("make clean") # Get the init.d startup script initd_script = 'proftpd.initd' initd_url = os.path.join(REPO_ROOT_URL, 'conf_files', initd_script) remote_file = "/etc/init.d/proftpd" env.safe_sudo("wget --output-document=%s %s" % (remote_file, initd_url)) env.safe_sed(remote_file, 'REPLACE_THIS_WITH_CUSTOM_INSTALL_DIR', install_dir, use_sudo=True) env.safe_sudo("chmod 755 %s" % remote_file) # Set the configuration file conf_file = 'proftpd.conf' remote_file = os.path.join(remote_conf_dir, conf_file) if "postgres_port" not in env: env.postgres_port = '5910' if "galaxy_ftp_user_password" not in env: env.galaxy_ftp_user_password = '******' proftpd_conf = {'galaxy_uid': env.safe_run('id -u galaxy'), 'galaxy_fs': '/mnt/galaxy', # Should be a var but uncertain how to get it 'install_dir': install_dir} _setup_conf_file(env, remote_file, conf_file, overrides=proftpd_conf, default_source="proftpd.conf.template") # Get the custom welcome msg file welcome_msg_file = 'welcome_msg.txt' welcome_url = os.path.join(REPO_ROOT_URL, 'conf_files', welcome_msg_file) env.safe_sudo("wget --output-document=%s %s" % (os.path.join(remote_conf_dir, welcome_msg_file), welcome_url)) # Stow env.safe_sudo("cd %s; stow proftpd" % env.install_dir) env.logger.debug("----- ProFTPd %s installed to %s -----" % (version, install_dir))
def install(self, local_env={}, force=False): """ If not already installed, install given tool and all of its dependencies. """ if self.set_env(local_env): if (not self.is_installed(local_env=local_env) and not self.tool_env['fatal_error']) or force: if not self.tool_env['dependencies_ok']: if not self.resolve_dependencies(): print "----- ERROR resolving dependencies -----" return False # Maybe the dependencies is all that was missing so check if # the tool can be considered as installed now if not self.is_installed(local_env=local_env) or force: print "Trying to install %s %s as user %s" % (str(self.tool_env['pkg_name']).upper(), self.tool_env['version'], env.user) install_cmd = sudo if self.tool_env['use_sudo'] else run with nested(cd(self.tool_env['install_dir']), settings(hide('stdout'))): install_cmd("wget %s" % self.tool_env['url']) install_cmd("wget %s" % self.tool_env['url2']) install_cmd("tar xfvj %s; rm %s" % (os.path.split(self.tool_env['url'])[1], os.path.split(self.tool_env['url'])[1])) install_cmd("tar xfvz %s; rm %s" % (os.path.split(self.tool_env['url2'])[1], os.path.split(self.tool_env['url2'])[1])) install_cmd("mv asp-%s/* .; rm -rf asp-%s" % (self.tool_env['version'], self.tool_env['version'])) # find a way to provide xmls if not exists('%s/tools/signals' % self.tool_env['galaxy_dir']): install_cmd('mkdir %s/tools/signals' % self.tool_env['galaxy_dir']) with settings(warn_only=True): install_cmd("sed -i '1i if [ -n \"${PACKAGE_BASE+x}\" ]; then cd %s; fi' %s.sh" % (self.tool_env['install_dir'], self.tool_env['pkg_name'])) install_cmd('mv %s.xml %s.sh %s/tools/signals/' % (self.tool_env['pkg_name'], self.tool_env['pkg_name'], self.tool_env['galaxy_dir'])) install_cmd("chmod +x %s/tools/signals/%s.sh" % (self.tool_env['galaxy_dir'], self.tool_env['pkg_name'])) install_cmd('chown -R %s %s/tools/signals' % (env.user, self.tool_env['galaxy_dir'])) if self.tool_env.has_key('shogun_env_script') and exists(self.tool_env['shogun_env_script']): install_cmd("echo '. %s' > %s/env.sh" % (self.tool_env['shogun_env_script'], self.tool_env['install_dir'])) install_cmd("echo 'export PATH=%s:$PATH' >> %s/env.sh" % (self.tool_env['install_dir'], self.tool_env['install_dir'])) install_cmd("echo 'export PYTHONPATH=%s:$PYTHONPATH' >> %s/env.sh" % (self.tool_env['install_dir'], self.tool_env['install_dir'])) install_cmd("chmod +x %s/env.sh" % self.tool_env['install_dir']) install_dir_root = os.path.join(self.tool_env['install_dir_root'], self.tool_env['pkg_name']) install_cmd('if [ ! -d %s/default ]; then ln -s %s %s/default; fi' % (install_dir_root, self.tool_env['install_dir'], install_dir_root)) install_cmd('chown -R %s %s' % (env.user, install_dir_root)) else: print "ERROR: Required dependency file for not found (for Shogun)." self.tool_env['fatal_error'] = True # Make sure the tool installed correctly if not self.tool_env['fatal_error'] and (self.tool_env['installed'] or self.is_installed()): return common.compose_successful_return(self.tool_env) print "----- Problem installing ASP -----" return self.tool_env
def install_sge(env): out_dir = "ge6.2u5" url = "%s/ge62u5_lx24-amd64.tar.gz" % CDN_ROOT_URL install_dir = env.install_dir if exists(os.path.join(install_dir, out_dir)): return with _make_tmp_dir() as work_dir: with contextlib.nested(cd(work_dir), settings(hide('stdout'))): run("wget %s" % url) sudo("chown %s %s" % (env.user, install_dir)) run("tar -C %s -xvzf %s" % (install_dir, os.path.split(url)[1]))
def install_nginx(env): """Nginx open source web server. http://www.nginx.org/ """ version = "1.2.0" url = "http://nginx.org/download/nginx-%s.tar.gz" % version install_dir = os.path.join(env.install_dir, "nginx") remote_conf_dir = os.path.join(install_dir, "conf") # Skip install if already present if exists(remote_conf_dir) and contains(os.path.join(remote_conf_dir, "nginx.conf"), "/cloud"): env.logger.debug("Nginx already installed; not installing it again.") return with _make_tmp_dir() as work_dir: with contextlib.nested(cd(work_dir), settings(hide('stdout'))): modules = _get_nginx_modules(env) module_flags = " ".join(["--add-module=../%s" % x for x in modules]) run("wget %s" % url) run("tar xvzf %s" % os.path.split(url)[1]) with cd("nginx-%s" % version): run("./configure --prefix=%s --with-ipv6 %s " "--user=galaxy --group=galaxy --with-debug " "--with-http_ssl_module --with-http_gzip_static_module" % (install_dir, module_flags)) sed("objs/Makefile", "-Werror", "") run("make") sudo("make install") sudo("cd %s; stow nginx" % env.install_dir) nginx_conf_file = 'nginx.conf' url = os.path.join(REPO_ROOT_URL, nginx_conf_file) with cd(remote_conf_dir): sudo("wget --output-document=%s/%s %s" % (remote_conf_dir, nginx_conf_file, url)) nginx_errdoc_file = 'nginx_errdoc.tar.gz' url = os.path.join(REPO_ROOT_URL, nginx_errdoc_file) remote_errdoc_dir = os.path.join(install_dir, "html") with cd(remote_errdoc_dir): sudo("wget --output-document=%s/%s %s" % (remote_errdoc_dir, nginx_errdoc_file, url)) sudo('tar xvzf %s' % nginx_errdoc_file) sudo("mkdir -p %s" % env.install_dir) if not exists("%s/nginx" % env.install_dir): sudo("ln -s %s/sbin/nginx %s/nginx" % (install_dir, env.install_dir)) # If the guessed symlinking did not work, force it now cloudman_default_dir = "/opt/galaxy/sbin" if not exists(cloudman_default_dir): sudo("mkdir -p %s" % cloudman_default_dir) if not exists(os.path.join(cloudman_default_dir, "nginx")): sudo("ln -s %s/sbin/nginx %s/nginx" % (install_dir, cloudman_default_dir)) env.logger.debug("Nginx {0} installed to {1}".format(version, install_dir))
def _do_cleanup(): with v_settings(host_string=env.hosts[0]): ec2_conn = _get_conn() with settings(warn_only=True): sudo("umount %s" % DEST_DATA_DIR) if _get_volume_ref(env.instance, ec2_conn): if _detach_volume(ec2_conn, env.volume.id, env.instance.id): _terminate_instance(env.instance, ec2_conn) if _create_snap(ec2_conn, env.volume.id): _delete_volume(ec2_conn, env.volume.id) os.remove(C_FILE) # Delete configuration file else: sudo("mount %s %s" % (env.vol_device, DEST_DATA_DIR))
def _cleanup_ec2(env): """ Clean up any extra files after building. This method must be called on an instance after being built and before creating a new machine image. *Note* that after this method has run, key-based ssh access to the machine is no longer possible. """ env.logger.info("Cleaning up for EC2 AMI creation") # Clean up log files and such fnames = [ ".bash_history", "/var/log/firstboot.done", ".nx_setup_done", "/var/crash/*", "%s/ec2autorun.py.log" % env.install_dir, "%s/ec2autorun.err" % env.install_dir, "%s/ec2autorun.log" % env.install_dir, "%s/bin/ec2autorun.log" % env.install_dir ] for fname in fnames: sudo("rm -f %s" % fname) rmdirs = ["/mnt/galaxyData", "/mnt/cm", "/tmp/cm"] for rmdir in rmdirs: sudo("rm -rf %s" % rmdir) # Seed the history with frequently used commands env.logger.debug("Setting bash history") local = os.path.join(env.config_dir, os.pardir, "installed_files", "bash_history") remote = os.path.join('/home', 'ubuntu', '.bash_history') put(local, remote, mode=0660, use_sudo=True) # Make sure the default config dir is owned by ubuntu sudo("chown ubuntu:ubuntu ~/.config") # Stop Apache from starting automatically at boot (it conflicts with Galaxy's nginx) sudo('/usr/sbin/update-rc.d -f apache2 remove') with settings(warn_only=True): # RabbitMQ fails to start if its database is embedded into the image # because it saves the current IP address or host name so delete it now. # When starting up, RabbitMQ will recreate that directory. sudo('/etc/init.d/rabbitmq-server stop') sudo('service rabbitmq-server stop') # Clean up packages that are causing issues or are unnecessary pkgs_to_remove = ['tntnet', 'tntnet-runtime', 'libtntnet9', 'vsftpd'] for ptr in pkgs_to_remove: sudo('apt-get -y --force-yes remove --purge {0}'.format(ptr)) sudo('initctl reload-configuration') for db_location in ['/var/lib/rabbitmq/mnesia', '/mnesia']: if exists(db_location): sudo('rm -rf %s' % db_location) # remove existing ssh host key pairs # http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/AESDG-chapter-sharingamis.html sudo("rm -f /etc/ssh/ssh_host_*") sudo("rm -f ~/.ssh/authorized_keys*") sudo("rm -f /root/.ssh/authorized_keys*")
def install_nginx(env): """Nginx open source web server. http://www.nginx.org/ """ version = "1.2.0" url = "http://nginx.org/download/nginx-%s.tar.gz" % version install_dir = os.path.join(env.install_dir, "nginx") remote_conf_dir = os.path.join(install_dir, "conf") # Skip install if already present if exists(remote_conf_dir) and contains(os.path.join(remote_conf_dir, "nginx.conf"), "/cloud"): env.logger.debug("Nginx already installed; not installing it again.") return with _make_tmp_dir() as work_dir: with contextlib.nested(cd(work_dir), settings(hide('stdout'))): modules = _get_nginx_modules(env) module_flags = " ".join(["--add-module=../%s" % x for x in modules]) run("wget %s" % url) run("tar xvzf %s" % os.path.split(url)[1]) with cd("nginx-%s" % version): run("./configure --prefix=%s --with-ipv6 %s " "--user=galaxy --group=galaxy --with-debug " "--with-http_ssl_module --with-http_gzip_static_module" % (install_dir, module_flags)) sed("objs/Makefile", "-Werror", "") run("make") sudo("make install") sudo("cd %s; stow nginx" % env.install_dir) defaults = {"galaxy_home": "/mnt/galaxyTools/galaxy-central"} _setup_conf_file(env, os.path.join(remote_conf_dir, "nginx.conf"), "nginx.conf", defaults=defaults) nginx_errdoc_file = 'nginx_errdoc.tar.gz' url = os.path.join(REPO_ROOT_URL, nginx_errdoc_file) remote_errdoc_dir = os.path.join(install_dir, "html") with cd(remote_errdoc_dir): sudo("wget --output-document=%s/%s %s" % (remote_errdoc_dir, nginx_errdoc_file, url)) sudo('tar xvzf %s' % nginx_errdoc_file) sudo("mkdir -p %s" % env.install_dir) if not exists("%s/nginx" % env.install_dir): sudo("ln -s %s/sbin/nginx %s/nginx" % (install_dir, env.install_dir)) # If the guessed symlinking did not work, force it now cloudman_default_dir = "/opt/galaxy/sbin" if not exists(cloudman_default_dir): sudo("mkdir -p %s" % cloudman_default_dir) if not exists(os.path.join(cloudman_default_dir, "nginx")): sudo("ln -s %s/sbin/nginx %s/nginx" % (install_dir, cloudman_default_dir)) env.logger.debug("Nginx {0} installed to {1}".format(version, install_dir))
def install_sge(env): """Sun Grid Engine. """ out_dir = "ge6.2u5" url = "%s/ge62u5_lx24-amd64.tar.gz" % CDN_ROOT_URL install_dir = env.install_dir if env.safe_exists(os.path.join(install_dir, out_dir)): return with _make_tmp_dir() as work_dir: with contextlib.nested(cd(work_dir), settings(hide('stdout'))): env.safe_run("wget %s" % url) env.safe_sudo("chown %s %s" % (env.user, install_dir)) env.safe_run("tar -C %s -xvzf %s" % (install_dir, os.path.split(url)[1])) env.logger.debug("SGE setup")
def install_proftpd(env): """Highly configurable GPL-licensed FTP server software. http://proftpd.org/ """ version = "1.3.4c" postgres_ver = "9.1" url = "ftp://ftp.tpnet.pl/pub/linux/proftpd/distrib/source/proftpd-%s.tar.gz" % version modules = "mod_sql:mod_sql_postgres:mod_sql_passwd" extra_modules = env.get("extra_proftp_modules", "") # Comma separated list of extra modules if extra_modules: modules = "%s:%s" % (modules, extra_modules.replace(",", ":")) install_dir = os.path.join(env.install_dir, 'proftpd') remote_conf_dir = os.path.join(install_dir, "etc") # skip install if already present if exists(remote_conf_dir): env.logger.debug("ProFTPd seems to already be installed in {0}".format(install_dir)) return with _make_tmp_dir() as work_dir: with cd(work_dir): run("wget %s" % url) with settings(hide('stdout')): run("tar xvzf %s" % os.path.split(url)[1]) with cd("proftpd-%s" % version): run("CFLAGS='-I/usr/include/postgresql' ./configure --prefix=%s " \ "--disable-auth-file --disable-ncurses --disable-ident --disable-shadow " \ "--enable-openssl --with-modules=%s " \ "--with-libraries=/usr/lib/postgresql/%s/lib" % (install_dir, modules, postgres_ver)) sudo("make") sudo("make install") sudo("make clean") # Get the init.d startup script initd_script = 'proftpd.initd' initd_url = os.path.join(REPO_ROOT_URL, 'conf_files', initd_script) remote_file = "/etc/init.d/proftpd" sudo("wget --output-document=%s %s" % (remote_file, initd_url)) sed(remote_file, 'REPLACE_THIS_WITH_CUSTOM_INSTALL_DIR', install_dir, use_sudo=True) sudo("chmod 755 %s" % remote_file) # Set the configuration file conf_file = 'proftpd.conf' conf_url = os.path.join(REPO_ROOT_URL, 'conf_files', conf_file) remote_file = os.path.join(remote_conf_dir, conf_file) sudo("wget --output-document=%s %s" % (remote_file, conf_url)) sed(remote_file, 'REPLACE_THIS_WITH_CUSTOM_INSTALL_DIR', install_dir, use_sudo=True) # Get the custom welcome msg file welcome_msg_file = 'welcome_msg.txt' welcome_url = os.path.join(REPO_ROOT_URL, 'conf_files', welcome_msg_file) sudo("wget --output-document=%s %s" % (os.path.join(remote_conf_dir, welcome_msg_file), welcome_url)) # Stow sudo("cd %s; stow proftpd" % env.install_dir) env.logger.debug("----- ProFTPd %s installed to %s -----" % (version, install_dir))
def _configure_nfs(env): nfs_dir = "/export/data" cloudman_dir = "/mnt/galaxyData/export" sudo("mkdir -p %s" % os.path.dirname(nfs_dir)) sudo("chown -R %s %s" % (env.user, os.path.dirname(nfs_dir))) with settings(warn_only=True): run("ln -s %s %s" % (cloudman_dir, nfs_dir)) exports = [ '/opt/sge *(rw,sync,no_root_squash,no_subtree_check)', '/mnt/galaxyData *(rw,sync,no_root_squash,subtree_check,no_wdelay)', '/mnt/galaxyIndices *(rw,sync,no_root_squash,no_subtree_check)', '/mnt/galaxyTools *(rw,sync,no_root_squash,no_subtree_check)', '%s *(rw,sync,no_root_squash,no_subtree_check)' % nfs_dir, '%s/openmpi *(rw,sync,no_root_squash,no_subtree_check)' % env.install_dir] append('/etc/exports', exports, use_sudo=True)
def _get_sge(): sge_dir = 'ge6.2u5' url = "%s/ge62u5_lx24-amd64.tar.gz" % CDN_ROOT_URL install_dir = env.install_dir if not exists(os.path.join(install_dir, sge_dir, 'ge-6.2u5-bin-lx24-amd64.tar.gz') or \ os.path.join(install_dir, sge_dir, 'ge-6.2u5-common.tar.gz')): with _make_tmp_dir() as work_dir: with contextlib.nested(cd(work_dir), settings(hide('stdout'))): run("wget %s" % url) sudo("chown %s %s" % (env.user, install_dir)) run("tar -C %s -xvzf %s" % (install_dir, os.path.split(url)[1])) print(green("----- SGE downloaded and extracted to '%s' -----" % install_dir)) else: print(green("SGE already exists at '%s'" % install_dir))
def _configure_nfs(env): """ Edit ``/etc/exports`` to append paths that are shared over NFS by CloudMan. In addition to the hard coded paths listed here, additional paths can be included by setting ``extra_nfs_exports`` in ``fabricrc.txt`` as a comma-separated list of directories. """ nfs_dir = "/export/data" cloudman_dir = "/mnt/galaxy/export" if not exists(nfs_dir): # For the case of rerunning this script, ensure the nfs_dir does # not exist (exists() method does not recognize it as a file because # by default it points to a non-existing dir/file). with settings(warn_only=True): sudo('rm -rf {0}'.format(nfs_dir)) sudo("mkdir -p %s" % os.path.dirname(nfs_dir)) sudo("ln -s %s %s" % (cloudman_dir, nfs_dir)) sudo("chown -R %s %s" % (env.user, os.path.dirname(nfs_dir))) # Setup /etc/exports paths, to be used as NFS mount points galaxy_data_mount = env.get("galaxy_data_mount", "/mnt/galaxyData") galaxy_indices_mount = env.get("galaxy_indices_mount", "/mnt/galaxyIndices") galaxy_tools_mount = env.get("galaxy_tools_mount", "/mnt/galaxyTools") exports = [ '/opt/sge *(rw,sync,no_root_squash,no_subtree_check)', '/opt/hadoop *(rw,sync,no_root_squash,no_subtree_check)', '%s *(rw,sync,no_root_squash,subtree_check,no_wdelay)' % galaxy_data_mount, '%s *(rw,sync,no_root_squash,no_subtree_check)' % galaxy_indices_mount, '%s *(rw,sync,no_root_squash,no_subtree_check)' % galaxy_tools_mount, '%s *(rw,sync,no_root_squash,no_subtree_check)' % nfs_dir, '%s/openmpi *(rw,sync,no_root_squash,no_subtree_check)' % env.install_dir ] extra_nfs_exports = env.get("extra_nfs_exports", "") for extra_nfs_export in extra_nfs_exports.split(","): exports.append('%s *(rw,sync,no_root_squash,no_subtree_check)' % extra_nfs_export) append('/etc/exports', exports, use_sudo=True) # Create a symlink for backward compatibility where all of CloudMan's # stuff is expected to be in /opt/galaxy old_dir = '/opt/galaxy' # Because stow is used, the equivalent to CloudMan's expected path # is actually the parent of the install_dir so use it for the symlink new_dir = os.path.dirname(env.install_dir) if not exists(old_dir) and exists(new_dir): sudo('ln -s {0} {1}'.format(new_dir, old_dir)) env.logger.info("Done configuring NFS for CloudMan")
def _mount_ebs(): with v_settings(host_string=env.hosts[0]): if not exists(DEST_DATA_DIR): sudo("mkdir -p %s" % DEST_DATA_DIR) # Check if DEST_DATA_DIR is empty before attempting to mount with settings(hide('stderr'), warn_only=True): result = sudo('[ "$(ls -A %s)" ]' % DEST_DATA_DIR) if result.failed: print "Directory '%s' is empty. Good." % DEST_DATA_DIR if not SNAP_ID: # If not recreating a volume from a snapshot, create file system before mounting sudo("mkfs.xfs %s" % env.vol_device) sudo("mount -t xfs %s %s" % (env.vol_device, DEST_DATA_DIR)) sudo("chown %s %s" % (env.user, DEST_DATA_DIR)) else: print "ERROR: data dir '%s' is not empty? Did not mount device '%s'" % (DEST_DATA_DIR, env.vol_device)
def install(self, local_env={}, force=False): """ If not already installed, install given tool and all of its dependencies. """ if self.set_env(local_env): if (not self.is_installed(local_env=local_env) and not self.tool_env['fatal_error']) or force: if not self.tool_env['dependencies_ok']: if not self.resolve_dependencies(): print "----- ERROR resolving dependencies -----" return False # Maybe the dependencies is all that was missing so check if # the tool can be considered as installed now if not self.is_installed(local_env=local_env) or force: print "Trying to install %s %s as user %s" % ( self.tool_env['pkg_name'], self.tool_env['version'], env.user) # TODO: Get complete file name list files = ['datagen.py'] install_cmd = sudo if self.tool_env['use_sudo'] else run if not exists(self.tool_env['install_dir']): install_cmd("mkdir -p %s" % self.tool_env['install_dir']) install_cmd("chown %s %s" % (self.tool_env['user'], self.tool_env['install_dir'])) with nested(cd(self.tool_env['install_dir']), settings(hide('stdout'))): for f in files: install_cmd("wget --no-check-certificate %s" % (self.tool_env['url'] + f)) if self.tool_env.has_key('shogun_env_script') and exists( self.tool_env['shogun_env_script']): install_cmd("echo '. %s' > %s/env.sh" % (self.tool_env['shogun_env_script'], self.tool_env['install_dir'])) install_cmd("chmod +x %s/env.sh" % self.tool_env['install_dir']) install_cmd('chown -R %s %s' % (env.user, self.tool_env['install_dir'])) else: print "ERROR: Required dependency file for not found (for Shogun)." self.tool_env['fatal_error'] = True # Make sure the tool installed correctly if not self.tool_env['fatal_error'] and (self.tool_env['installed'] or self.is_installed()): return common.compose_successful_return(self.tool_env) print "----- Problem installing EasySVM -----" return self.tool_env
def _configure_nfs(env): nfs_dir = "/export/data" cloudman_dir = "/mnt/galaxyData/export" sudo("mkdir -p %s" % os.path.dirname(nfs_dir)) sudo("chown -R ubuntu %s" % os.path.dirname(nfs_dir)) with settings(warn_only=True): run("ln -s %s %s" % (cloudman_dir, nfs_dir)) exports = [ '/opt/sge *(rw,sync,no_root_squash,no_subtree_check)', '/mnt/galaxyData *(rw,sync,no_root_squash,subtree_check,no_wdelay)', '/mnt/galaxyIndices *(rw,sync,no_root_squash,no_subtree_check)', '/mnt/galaxyTools *(rw,sync,no_root_squash,no_subtree_check)', '%s *(rw,sync,no_root_squash,no_subtree_check)' % nfs_dir, '%s/openmpi *(rw,sync,no_root_squash,no_subtree_check)' % env.install_dir ] append('/etc/exports', exports, use_sudo=True)
def ssh_key(): """Install your SSH public key as an authorized key on the server""" key = util.get_ssh_key() login = os.environ["USER"] script_text = """KEYS=~%(login)s/.ssh/authorized_keys DIR=$(dirname "${KEYS}") mkdir -p "${DIR}" touch "${KEYS}" chown '%(login)s:%(login)s' "${DIR}" "${KEYS}" chmod 700 "${DIR}" chmod 600 "${KEYS}" cat <<EOF>> "${KEYS}" %(key)s EOF """ % locals() with settings(user="******"): script(script_text, "install ssh key to " + login)
def install_nginx(env): version = "0.7.67" url = "http://nginx.org/download/nginx-%s.tar.gz" % version install_dir = os.path.join(env.install_dir, "nginx") remote_conf_dir = os.path.join(install_dir, "conf") # skip install if already present if exists(remote_conf_dir) and contains( os.path.join(remote_conf_dir, "nginx.conf"), "/cloud"): return with _make_tmp_dir() as work_dir: with contextlib.nested(cd(work_dir), settings(hide('stdout'))): modules = _get_nginx_modules(env) module_flags = " ".join( ["--add-module=../%s" % x for x in modules]) run("wget %s" % url) run("tar xvzf %s" % os.path.split(url)[1]) with cd("nginx-%s" % version): run("./configure --prefix=%s --with-ipv6 %s " "--user=galaxy --group=galaxy " "--with-http_ssl_module --with-http_gzip_static_module" % (install_dir, module_flags)) sed("objs/Makefile", "-Werror", "") run("make") sudo("make install") sudo("cd %s; stow nginx" % env.install_dir) nginx_conf_file = 'nginx.conf' url = os.path.join(REPO_ROOT_URL, nginx_conf_file) with cd(remote_conf_dir): sudo("wget --output-document=%s/%s %s" % (remote_conf_dir, nginx_conf_file, url)) nginx_errdoc_file = 'nginx_errdoc.tar.gz' url = os.path.join(REPO_ROOT_URL, nginx_errdoc_file) remote_errdoc_dir = os.path.join(install_dir, "html") with cd(remote_errdoc_dir): sudo("wget --output-document=%s/%s %s" % (remote_errdoc_dir, nginx_errdoc_file, url)) sudo('tar xvzf %s' % nginx_errdoc_file) sudo("mkdir -p %s" % env.install_dir) if not exists("%s/nginx" % env.install_dir): sudo("ln -s %s/sbin/nginx %s/nginx" % (install_dir, env.install_dir))
def _cleanup_ec2(env): """ Clean up any extra files after building. This method must be called on an instance after being built and before creating a new machine image. *Note* that after this method has run, key-based ssh access to the machine is no longer possible. """ env.logger.info("Cleaning up for EC2 AMI creation") # Clean up log files and such fnames = [".bash_history", "/var/log/firstboot.done", ".nx_setup_done", "/var/crash/*", "%s/ec2autorun.py.log" % env.install_dir, "%s/ec2autorun.err" % env.install_dir, "%s/ec2autorun.log" % env.install_dir, "%s/bin/ec2autorun.log" % env.install_dir] for fname in fnames: sudo("rm -f %s" % fname) rmdirs = ["/mnt/galaxyData", "/mnt/cm", "/tmp/cm"] for rmdir in rmdirs: sudo("rm -rf %s" % rmdir) # Seed the history with frequently used commands env.logger.debug("Setting bash history") local = os.path.join(env.config_dir, os.pardir, "installed_files", "bash_history") remote = os.path.join('/home', 'ubuntu', '.bash_history') put(local, remote, mode=0660, use_sudo=True) # Make sure the default config dir is owned by ubuntu sudo("chown ubuntu:ubuntu ~/.config") # Stop Apache from starting automatically at boot (it conflicts with Galaxy's nginx) sudo('/usr/sbin/update-rc.d -f apache2 remove') with settings(warn_only=True): # RabbitMQ fails to start if its database is embedded into the image # because it saves the current IP address or host name so delete it now. # When starting up, RabbitMQ will recreate that directory. sudo('/etc/init.d/rabbitmq-server stop') sudo('service rabbitmq-server stop') # Clean up packages that are causing issues or are unnecessary pkgs_to_remove = ['tntnet', 'tntnet-runtime', 'libtntnet9', 'vsftpd'] for ptr in pkgs_to_remove: sudo('apt-get -y --force-yes remove --purge {0}'.format(ptr)) sudo('initctl reload-configuration') for db_location in ['/var/lib/rabbitmq/mnesia', '/mnesia']: if exists(db_location): sudo('rm -rf %s' % db_location) # remove existing ssh host key pairs # http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/AESDG-chapter-sharingamis.html sudo("rm -f /etc/ssh/ssh_host_*") sudo("rm -f ~/.ssh/authorized_keys*") sudo("rm -f /root/.ssh/authorized_keys*")
def _configure_nfs(env): """ Edit ``/etc/exports`` to append paths that are shared over NFS by CloudMan. In addition to the hard coded paths listed here, additional paths can be included by setting ``extra_nfs_exports`` in ``fabricrc.txt`` as a comma-separated list of directories. """ nfs_dir = "/export/data" cloudman_dir = "/mnt/galaxy/export" if not env.safe_exists(nfs_dir): # For the case of rerunning this script, ensure the nfs_dir does # not exist (exists() method does not recognize it as a file because # by default it points to a non-existing dir/file). with settings(warn_only=True): env.safe_sudo('rm -rf {0}'.format(nfs_dir)) env.safe_sudo("mkdir -p %s" % os.path.dirname(nfs_dir)) env.safe_sudo("ln -s %s %s" % (cloudman_dir, nfs_dir)) env.safe_sudo("chown -R %s %s" % (env.user, os.path.dirname(nfs_dir))) # Setup /etc/exports paths, to be used as NFS mount points # galaxy_data_mount = env.get("galaxy_data_mount", "/mnt/galaxyData") # galaxy_indices_mount = env.get("galaxy_indices_mount", "/mnt/galaxyIndices") # galaxy_tools_mount = env.get("galaxy_tools_mount", "/mnt/galaxyTools") exports = ['/opt/sge *(rw,sync,no_root_squash,no_subtree_check)', '/opt/hadoop *(rw,sync,no_root_squash,no_subtree_check)', # '%s *(rw,sync,no_root_squash,subtree_check,no_wdelay)' % galaxy_data_mount, # '%s *(rw,sync,no_root_squash,no_subtree_check)' % galaxy_indices_mount, # '%s *(rw,sync,no_root_squash,no_subtree_check)' % galaxy_tools_mount, # '%s *(rw,sync,no_root_squash,no_subtree_check)' % nfs_dir, # '%s/openmpi *(rw,sync,no_root_squash,no_subtree_check)' % env.install_dir ] extra_nfs_exports = env.get("extra_nfs_exports", "") if extra_nfs_exports: for extra_nfs_export in extra_nfs_exports.split(","): exports.append('%s *(rw,sync,no_root_squash,no_subtree_check)' % extra_nfs_export) env.safe_append('/etc/exports', exports, use_sudo=True) # Create a symlink for backward compatibility where all of CloudMan's # stuff is expected to be in /opt/galaxy old_dir = '/opt/galaxy' # Because stow is used, the equivalent to CloudMan's expected path # is actually the parent of the install_dir so use it for the symlink new_dir = os.path.dirname(env.install_dir) if not env.safe_exists(old_dir) and exists(new_dir): env.safe_sudo('ln -s {0} {1}'.format(new_dir, old_dir)) env.logger.info("Done configuring NFS for CloudMan")
def is_installed(self, local_env={}): """ Check if the current tool and its dependencies are installed. Optionally, missing dependencies may be installed.""" if not self.tool_env['env_set']: self.set_env(local_env) print "Checking if %s %s is installed..." % (self.tool_env['pkg_name'], self.tool_env['version']) if not self.tool_env['dependencies_ok']: if not self.resolve_dependencies(install=False): return False install_cmd = sudo if self.tool_env['use_sudo'] else run with nested(cd(self.tool_env['install_dir']), settings(hide('stdout'), warn_only=True)): result = install_cmd("source env.sh; python kirmes.py --help") if result.return_code == 0: self.tool_env['installed'] = True print "%s %s is installed in %s" % (str(self.tool_env['pkg_name']).upper(), self.tool_env['version'], self.tool_env['install_dir']) return True print "%s %s is not installed." % (self.tool_env['pkg_name'], self.tool_env['version']) return False
def install_nginx(env): version = "0.7.67" url = "http://nginx.org/download/nginx-%s.tar.gz" % version install_dir = os.path.join(env.install_dir, "nginx") remote_conf_dir = os.path.join(install_dir, "conf") # skip install if already present if exists(remote_conf_dir) and contains(os.path.join(remote_conf_dir, "nginx.conf"), "/cloud"): return with _make_tmp_dir() as work_dir: with contextlib.nested(cd(work_dir), settings(hide("stdout"))): modules = _get_nginx_modules(env) module_flags = " ".join(["--add-module=../%s" % x for x in modules]) run("wget %s" % url) run("tar xvzf %s" % os.path.split(url)[1]) with cd("nginx-%s" % version): run( "./configure --prefix=%s --with-ipv6 %s " "--user=galaxy --group=galaxy " "--with-http_ssl_module --with-http_gzip_static_module" % (install_dir, module_flags) ) sed("objs/Makefile", "-Werror", "") run("make") sudo("make install") sudo("cd %s; stow nginx" % env.install_dir) nginx_conf_file = "nginx.conf" url = os.path.join(REPO_ROOT_URL, nginx_conf_file) with cd(remote_conf_dir): sudo("wget --output-document=%s/%s %s" % (remote_conf_dir, nginx_conf_file, url)) nginx_errdoc_file = "nginx_errdoc.tar.gz" url = os.path.join(REPO_ROOT_URL, nginx_errdoc_file) remote_errdoc_dir = os.path.join(install_dir, "html") with cd(remote_errdoc_dir): sudo("wget --output-document=%s/%s %s" % (remote_errdoc_dir, nginx_errdoc_file, url)) sudo("tar xvzf %s" % nginx_errdoc_file) cloudman_default_dir = "/opt/galaxy/sbin" sudo("mkdir -p %s" % cloudman_default_dir) if not exists("%s/nginx" % cloudman_default_dir): sudo("ln -s %s/sbin/nginx %s/nginx" % (install_dir, cloudman_default_dir))
def _clean_galaxy_dir(): # Clean up galaxy directory before snapshoting with settings(warn_only=True): print(yellow("Cleaning Galaxy's directory")) if exists("%s/paster.log" % GALAXY_HOME): sudo("rm %s/paster.log" % GALAXY_HOME) sudo("rm %s/database/pbs/*" % GALAXY_HOME) # set up the symlink for SAMTOOLS (remove this code once SAMTOOLS is converted to data tables) if exists("%s/tool-data/sam_fa_indices.loc" % GALAXY_HOME): sudo("rm %s/tool-data/sam_fa_indices.loc" % GALAXY_HOME) tmp_loc = False if not exists( "/mnt/galaxyIndices/galaxy/tool-data/sam_fa_indices.loc"): sudo( "touch /mnt/galaxyIndices/galaxy/tool-data/sam_fa_indices.loc") tmp_loc = True sudo( "ln -s /mnt/galaxyIndices/galaxy/tool-data/sam_fa_indices.loc %s/tool-data/sam_fa_indices.loc" % GALAXY_HOME) if tmp_loc: sudo("rm /mnt/galaxyIndices/galaxy/tool-data/sam_fa_indices.loc") # If needed, upload the custom cloud welcome screen files if not exists("%s/static/images/cloud.gif" % GALAXY_HOME): sudo( "wget --output-document=%s/static/images/cloud.gif %s/cloud.gif" % (GALAXY_HOME, CDN_ROOT_URL)) if not exists("%s/static/images/cloud_txt.png" % GALAXY_HOME): sudo( "wget --output-document=%s/static/images/cloud_text.png %s/cloud_text.png" % (GALAXY_HOME, CDN_ROOT_URL)) if not exists("%s/static/welcome.html" % GALAXY_HOME): sudo( "wget --output-document=%s/static/welcome.html %s/welcome.html" % (GALAXY_HOME, CDN_ROOT_URL)) # Clean up configuration files form the snapshot to ensure those get # downloaded from cluster's (or default) bucket at cluster instantiation if exists("%s/universe_wsgi.ini.cloud" % GALAXY_HOME): sudo("rm %s/universe_wsgi.ini.cloud" % GALAXY_HOME) if exists("%s/tool_conf.xml.cloud" % GALAXY_HOME): sudo("rm %s/tool_conf.xml.cloud" % GALAXY_HOME) if exists("%s/tool_data_table_conf.xml.cloud" % GALAXY_HOME): sudo("rm %s/tool_data_table_conf.xml.cloud" % GALAXY_HOME)
def install_proftpd(env): version = "1.3.3d" postgres_ver = "8.4" url = "ftp://mirrors.ibiblio.org/proftpd/distrib/source/proftpd-%s.tar.gz" % version install_dir = os.path.join(env.install_dir, 'proftpd') remote_conf_dir = os.path.join(install_dir, "etc") # skip install if already present if exists(remote_conf_dir): return with _make_tmp_dir() as work_dir: with cd(work_dir): run("wget %s" % url) with settings(hide('stdout')): run("tar xvzf %s" % os.path.split(url)[1]) with cd("proftpd-%s" % version): run("CFLAGS='-I/usr/include/postgresql' ./configure --prefix=%s --disable-auth-file --disable-ncurses --disable-ident --disable-shadow --enable-openssl --with-modules=mod_sql:mod_sql_postgres:mod_sql_passwd --with-libraries=/usr/lib/postgres/%s/lib" % (install_dir, postgres_ver)) sudo("make") sudo("make install") sudo("make clean") # Get init.d startup script initd_script = 'proftpd' initd_url = os.path.join(REPO_ROOT_URL, 'conf_files', initd_script) sudo("wget --output-document=%s %s" % (os.path.join('/etc/init.d', initd_script), initd_url)) sudo("chmod 755 %s" % os.path.join('/etc/init.d', initd_script)) # Get configuration files proftpd_conf_file = 'proftpd.conf' welcome_msg_file = 'welcome_msg.txt' conf_url = os.path.join(REPO_ROOT_URL, 'conf_files', proftpd_conf_file) welcome_url = os.path.join(REPO_ROOT_URL, 'conf_files', welcome_msg_file) sudo("wget --output-document=%s %s" % (os.path.join( remote_conf_dir, proftpd_conf_file), conf_url)) sudo("wget --output-document=%s %s" % (os.path.join( remote_conf_dir, welcome_msg_file), welcome_url)) sudo("cd %s; stow proftpd" % env.install_dir)
def is_installed(self, local_env={}, install_dependencies=False): """ Check if the current tool and its dependencies are installed. Optionally, missing dependencies may be installed.""" if not self.tool_env['env_set']: self.set_env(local_env) print "Checking if %s %s is installed..." % (self.tool_env['pkg_name'], self.tool_env['version']) f = tempfile.NamedTemporaryFile() f.write(test_script) f.flush() with common.make_tmp_dir(self.tool_env) as work_dir: put(f.name, os.path.join(work_dir, 'features_string_char_modular.py')) f.close() with cd(work_dir): with settings(warn_only=True): install_cmd = sudo if self.tool_env['use_sudo'] else run result = install_cmd("source %s/env.sh; python features_string_char_modular.py" % self.tool_env['install_dir']) if result.return_code == 0: self.tool_env['installed'] = True print "%s %s is installed in %s" % (str(self.tool_env['pkg_name']).upper(), self.tool_env['version'], self.tool_env['install_dir']) return True print "Shogun %s is not installed." % self.tool_env['version'] return False
def _configure_nfs(): nfs_dir = "/export/data" cloudman_dir = "/mnt/galaxyData/export" if not exists(nfs_dir): sudo("mkdir -p %s" % os.path.dirname(nfs_dir)) sudo("chown -R ubuntu %s" % os.path.dirname(nfs_dir)) with settings(warn_only=True): run("ln -s %s %s" % (cloudman_dir, nfs_dir)) nfs_file = '/etc/exports' if not contains(nfs_file, '/opt/sge'): append(nfs_file, '/opt/sge *(rw,sync,no_root_squash,no_subtree_check)', use_sudo=True) if not contains(nfs_file, '/mnt/galaxyData'): append(nfs_file, '/mnt/galaxyData *(rw,sync,no_root_squash,subtree_check,no_wdelay)', use_sudo=True) if not contains(nfs_file, nfs_dir): append(nfs_file, '%s *(rw,sync,no_root_squash,no_subtree_check)' % nfs_dir, use_sudo=True) if not contains(nfs_file, '%s/openmpi' % env.install_dir): append(nfs_file, '%s/openmpi *(rw,sync,no_root_squash,no_subtree_check)' % env.install_dir, use_sudo=True) if env.galaxy_too: if not contains(nfs_file, '/mnt/galaxyIndices'): append(nfs_file, '/mnt/galaxyIndices *(rw,sync,no_root_squash,no_subtree_check)', use_sudo=True) if not contains(nfs_file, '/mnt/galaxyTools'): append(nfs_file, '/mnt/galaxyTools *(rw,sync,no_root_squash,no_subtree_check)', use_sudo=True) print(green("NFS /etc/exports dir configured"))
def is_installed(self, local_env={}, install_dependencies=False): """ Check if the current tool and its dependencies are installed. Optionally, missing dependencies may be installed.""" if not self.tool_env['env_set']: self.set_env(local_env) print "Checking if %s %s is installed..." % (str( self.tool_env['pkg_name']).upper(), self.tool_env['version']) if not self.tool_env['dependencies_ok']: if not self.resolve_dependencies(install=False): return False install_cmd = sudo if self.tool_env['use_sudo'] else run with nested(cd(self.tool_env['install_dir']), settings(hide('stdout'), warn_only=True)): result = install_cmd("source env.sh; asp --organism=Worm dna.fa") if result.return_code == 0: self.tool_env['installed'] = True print "%s %s is installed in %s" % ( str(self.tool_env['pkg_name']).upper(), self.tool_env['version'], self.tool_env['install_dir']) return True print "%s %s is not installed." % (str( self.tool_env['pkg_name']).upper(), self.tool_env['version']) return False
def _install_proftpd(): version = "1.3.3d" postgres_ver = "8.4" url = "ftp://mirrors.ibiblio.org/proftpd/distrib/source/proftpd-%s.tar.gz" % version install_dir = os.path.join(env.install_dir, 'proftpd') remote_conf_dir = os.path.join(install_dir, "etc") # skip install if already present if exists(remote_conf_dir): return with _make_tmp_dir() as work_dir: with cd(work_dir): run("wget %s" % url) with settings(hide('stdout')): run("tar xzf %s" % os.path.split(url)[1]) with cd("proftpd-%s" % version): run("CFLAGS='-I/usr/include/postgresql' ./configure --prefix=%s --disable-auth-file --disable-ncurses --disable-ident --disable-shadow --enable-openssl --with-modules=mod_sql:mod_sql_postgres:mod_sql_passwd --with-libraries=/usr/lib/postgres/%s/lib" % (install_dir, postgres_ver)) run("make") sudo("make install") sudo("make clean") # Get init.d startup script proftp_initd_script = 'proftpd' local_proftp_initd_path = os.path.join('conf_files',proftp_initd_script) remote_proftpd_initd_path = os.path.join('/etc/init.d',proftp_initd_script) _put_as_user(local_proftp_initd_path,remote_proftpd_initd_path, user='******') sudo('chmod 755 %s' % remote_proftpd_initd_path) # Get configuration files proftpd_conf_file = 'proftpd.conf' local_conf_path = os.path.join('conf_files',proftpd_conf_file) remote_conf_path = os.path.join(remote_conf_dir,proftpd_conf_file) welcome_msg_file = 'welcome_msg.txt' local_welcome_msg_path = os.path.join('conf_files',welcome_msg_file) remote_welcome_msg_path = os.path.join(remote_conf_dir, welcome_msg_file) _put_as_user(local_conf_path,remote_conf_path, user='******') _put_as_user(local_welcome_msg_path,remote_welcome_msg_path, user='******') sudo("cd %s; stow proftpd" % env.install_dir) print(green("----- ProFTPd %s installed to %s -----" % (version, install_dir)))
def install(self, local_env={}, force=False): """ If not already installed, install given tool and all of its dependencies. """ if self.set_env(local_env): if (not self.is_installed(local_env=local_env) and not self.tool_env['fatal_error']) or force: if not self.tool_env['dependencies_ok']: if not self.resolve_dependencies(): print "----- ERROR resolving dependencies -----" return False # Maybe the dependencies is all that was missing so check if # the tool can be considered as installed now if not self.is_installed(local_env=local_env) or force: print "Trying to install %s %s as user %s" % ( str(self.tool_env['pkg_name']).upper(), self.tool_env['version'], env.user) install_cmd = sudo if self.tool_env['use_sudo'] else run with nested(cd(self.tool_env['install_dir']), settings(hide('stdout'))): install_cmd("wget %s" % self.tool_env['url']) install_cmd("wget %s" % self.tool_env['url2']) install_cmd("tar xfvj %s; rm %s" % (os.path.split(self.tool_env['url'])[1], os.path.split(self.tool_env['url'])[1])) install_cmd("tar xfvz %s; rm %s" % (os.path.split(self.tool_env['url2'])[1], os.path.split(self.tool_env['url2'])[1])) install_cmd("mv asp-%s/* .; rm -rf asp-%s" % (self.tool_env['version'], self.tool_env['version'])) # find a way to provide xmls if not exists('%s/tools/signals' % self.tool_env['galaxy_dir']): install_cmd('mkdir %s/tools/signals' % self.tool_env['galaxy_dir']) with settings(warn_only=True): install_cmd( "sed -i '1i if [ -n \"${PACKAGE_BASE+x}\" ]; then cd %s; fi' %s.sh" % (self.tool_env['install_dir'], self.tool_env['pkg_name'])) install_cmd( 'mv %s.xml %s.sh %s/tools/signals/' % (self.tool_env['pkg_name'], self.tool_env['pkg_name'], self.tool_env['galaxy_dir'])) install_cmd("chmod +x %s/tools/signals/%s.sh" % (self.tool_env['galaxy_dir'], self.tool_env['pkg_name'])) install_cmd( 'chown -R %s %s/tools/signals' % (env.user, self.tool_env['galaxy_dir'])) if self.tool_env.has_key('shogun_env_script') and exists( self.tool_env['shogun_env_script']): install_cmd("echo '. %s' > %s/env.sh" % (self.tool_env['shogun_env_script'], self.tool_env['install_dir'])) install_cmd( "echo 'export PATH=%s:$PATH' >> %s/env.sh" % (self.tool_env['install_dir'], self.tool_env['install_dir'])) install_cmd( "echo 'export PYTHONPATH=%s:$PYTHONPATH' >> %s/env.sh" % (self.tool_env['install_dir'], self.tool_env['install_dir'])) install_cmd("chmod +x %s/env.sh" % self.tool_env['install_dir']) install_dir_root = os.path.join( self.tool_env['install_dir_root'], self.tool_env['pkg_name']) install_cmd( 'if [ ! -d %s/default ]; then ln -s %s %s/default; fi' % (install_dir_root, self.tool_env['install_dir'], install_dir_root)) install_cmd('chown -R %s %s' % (env.user, install_dir_root)) else: print "ERROR: Required dependency file for not found (for Shogun)." self.tool_env['fatal_error'] = True # Make sure the tool installed correctly if not self.tool_env['fatal_error'] and (self.tool_env['installed'] or self.is_installed()): return common.compose_successful_return(self.tool_env) print "----- Problem installing ASP -----" return self.tool_env