Ejemplo n.º 1
0
 def add(self):
     """
     Add this file system by creating a dedicated path (i.e., self.fs.mount_point)
     and exporting it over NFS. Set the owner of the repo as ``ubuntu`` user.
     """
     log.debug("Adding a transient FS at {0}".format(self.fs.mount_point))
     # transient_nfs gets created first - make sure it's on a dedicated device
     if self.fs.name == 'transient_nfs' and self.app.cloud_type == 'ec2':
         self._ensure_ephemeral_disk_mounted()
     misc.make_dir(self.fs.mount_point, owner='ubuntu')
     # Set the device ID
     cmd = "df %s | grep -v Filesystem | awk '{print $1}'" % self.fs.mount_point
     self.device = misc.getoutput(cmd)
     # If based on an archive, extract archive contents to the mount point
     if self.from_archive:
         self.fs.persistent = True
         self.fs.state = service_states.CONFIGURING
         # Extract the FS archive in a separate thread
         log.debug("Extracting transient FS {0} from an archive in a "
                   "dedicated thread.".format(self.get_full_name()))
         ExtractArchive(self.from_archive['url'], self.fs.mount_point,
                        self.from_archive['md5_sum'],
                        callback=self.fs.nfs_share_and_set_state).start()
     else:
         self.fs.nfs_share_and_set_state()
Ejemplo n.º 2
0
 def _configure(self):
     """
     Configure the Pulsar application.
     """
     self.state = service_states.CONFIGURING
     misc.make_dir(self.pulsar_home)
     # Write out app.yml
     token = ''.join(random.SystemRandom().choice(string.ascii_uppercase +
                     string.lowercase + string.digits) for _ in range(25))
     app_template = Template(app_yml)
     app_yml_file = os.path.join(self.pulsar_home, 'app.yml')
     misc.write_template_file(app_template, {'token': token}, app_yml_file)
     # Write out server.ini
     srvr_template = Template(server_ini)
     server_ini_file = os.path.join(self.pulsar_home, 'server.ini')
     misc.write_template_file(srvr_template, {'pulsar_port': self.pulsar_port},
                              server_ini_file)
     # Write out local_env.sh
     lcl_template = Template(local_env_sh)
     lcl_file = os.path.join(self.pulsar_home, 'local_env.sh')
     misc.write_template_file(lcl_template, {'galaxy_home': '/mnt/galaxy/galaxy-app'},
                              lcl_file)
     # Set the owner to 'galaxy' system user
     attempt_chown_galaxy(self.pulsar_home, recursive=True)
     if self.supervisor:
         # Create a supervisor conf file
         supervisor_conf_file = os.path.join(self.supervisor.conf_dir,
                                             '{0}.conf'.format(self.supervisor_prog_name))
         template = Template(supervisor_conf)
         misc.write_template_file(template, None, supervisor_conf_file)
     else:
         log.warning("No supervisor service?")
Ejemplo n.º 3
0
 def _setup(self):
     log.debug("Running GalaxyReportsService _setup")
     reports_option_manager = DirectoryGalaxyOptionManager(self.app,
                                                           conf_dir=self.conf_dir,
                                                           conf_file_name='reports_wsgi.ini')
     reports_option_manager.setup()
     file_path = os.path.join(self.app.path_resolver.galaxy_data, "files")
     misc.make_dir(file_path, owner='galaxy')
     new_file_path = os.path.join(self.app.path_resolver.galaxy_data, "tmp")
     main_props = {
         'database_connection': "postgres://galaxy@localhost:{0}/galaxy"
                                .format(self.app.path_resolver.psql_db_port),
         'filter-with': 'proxy-prefix',
         'file_path': file_path,
         'new_file_path': new_file_path
     }
     proxy_props = {
         'use': 'egg:PasteDeploy#prefix',
         'prefix': '/reports',
     }
     reports_option_manager.set_properties(main_props, section='app:main',
                                           description='app_main_props')
     reports_option_manager.set_properties(proxy_props,
                                           section='filter:proxy-prefix',
                                           description='proxy_prefix_props')
Ejemplo n.º 4
0
 def add(self):
     """
     Add this file system by creating a dedicated path (i.e., self.fs.mount_point)
     and exporting it over NFS. Set the owner of the repo as ``ubuntu`` user.
     """
     log.debug("Adding a transient FS at {0}".format(self.fs.mount_point))
     misc.make_dir(self.fs.mount_point, owner='ubuntu')
     # Make the default instance transient storage group writable
     if self.fs.name == 'transient_nfs':
         misc.chmod(self.fs.mount_point, 0775)
     # Set the device ID
     cmd = "df %s | grep -v Filesystem | awk '{print $1}'" % self.fs.mount_point
     self.device = misc.getoutput(cmd)
     # If based on an archive, extract archive contents to the mount point
     if self.from_archive:
         # Do not overwrite an existing dir structure w/ the archive content.
         # This happens when a cluster is rebooted.
         if self.fs.name == 'galaxy' and os.path.exists(self.app.path_resolver.galaxy_home):
             log.debug("Galaxy home dir ({0}) already exists; not extracting "
                       "the archive ({1}) so not to overwrite it."
                       .format(self.app.path_resolver.galaxy_home, self.from_archive['url']))
             self.fs.nfs_share_and_set_state()
         else:
             self.fs.persistent = True
             self.fs.state = service_states.CONFIGURING
             # Extract the FS archive in a separate thread
             log.debug("Extracting transient FS {0} from an archive in a "
                       "dedicated thread.".format(self.get_full_name()))
             ExtractArchive(self.from_archive['url'], self.fs.mount_point,
                            self.from_archive['md5_sum'],
                            callback=self.fs.nfs_share_and_set_state).run()
     else:
         self.fs.nfs_share_and_set_state()
Ejemplo n.º 5
0
 def _setup(self):
     log.debug("Running GalaxyReportsService _setup")
     reports_option_manager = DirectoryGalaxyOptionManager(
         self.app,
         conf_dir=self.conf_dir,
         conf_file_name='reports_wsgi.ini')
     reports_option_manager.setup()
     file_path = os.path.join(self.app.path_resolver.galaxy_data, "files")
     misc.make_dir(file_path, owner='galaxy')
     new_file_path = os.path.join(self.app.path_resolver.galaxy_data, "tmp")
     main_props = {
         'database_connection':
         "postgres://galaxy@localhost:{0}/galaxy".format(
             self.app.path_resolver.psql_db_port),
         'filter-with':
         'proxy-prefix',
         'file_path':
         file_path,
         'new_file_path':
         new_file_path
     }
     proxy_props = {
         'use': 'egg:PasteDeploy#prefix',
         'prefix': '/reports',
     }
     reports_option_manager.set_properties(main_props,
                                           section='app:main',
                                           description='app_main_props')
     reports_option_manager.set_properties(proxy_props,
                                           section='filter:proxy-prefix',
                                           description='proxy_prefix_props')
Ejemplo n.º 6
0
    def _setup_slurm_conf(self):
        """
        Setup ``slurm.conf`` configuration file
        """
        def _worker_nodes_conf():
            """
            Compose the conf lines pertaining to the worker nodes. Return two
            lists of strings: a one-per-line node specifications (eg,
            ``NodeName=w1 NodeAddr=<private_IP> Weight=5 State=UNKNOWN``) and
            a list of node names (eg, w1, w2).
            Note that only nodes in status ``Ready`` or ``Startup`` will be
            included.
            """
            wnc = ''
            wnn = ''
            for i, w in enumerate(self.app.manager.worker_instances):
                if w.worker_status in ['Ready', 'Startup']:
                    wnc += ('NodeName={0} NodeAddr={1} CPUs={2} RealMemory={3} Weight=5 State=UNKNOWN\n'
                            .format(w.alias, w.private_ip, w.num_cpus,
                                    max(1, w.total_memory / 1024)))
                    wnn += ',{0}'.format(w.alias)
            log.debug("Worker node names to include in slurm.conf: {0}".format(wnn[1:]))
            return wnc, wnn

        def _build_slurm_conf():
            log.debug("Setting slurm.conf parameters")
            # Make sure the slurm root dir exists and is owned by slurm user
            misc.make_dir(self.app.path_resolver.slurm_root_tmp)
            os.chown(self.app.path_resolver.slurm_root_tmp,
                     pwd.getpwnam("slurm")[2], grp.getgrnam("slurm")[2])
            worker_nodes, worker_names = _worker_nodes_conf()
            slurm_conf_template = conf_manager.load_conf_template(conf_manager.SLURM_CONF_TEMPLATE)
            slurm_conf_params = {
                "master_hostname": misc.get_hostname(),
                "num_cpus": max(self.app.manager.num_cpus - 1, 1),  # Reserve 1 CPU
                "total_memory": max(1, self.app.manager.total_memory / 1024),
                "slurm_root_tmp": self.app.path_resolver.slurm_root_tmp,
                "worker_nodes": worker_nodes,
                "worker_names": worker_names
            }
            return slurm_conf_template.substitute(slurm_conf_params)

        if not os.path.exists(self.app.path_resolver.slurm_root_nfs):
            misc.make_dir(self.app.path_resolver.slurm_root_nfs)
        nfs_slurm_conf = self.app.path_resolver.slurm_conf_nfs
        local_slurm_conf = self.app.path_resolver.slurm_conf_local
        # Ocasionally, NFS file is unavailable so try a few times
        for i in range(5):
            with flock(self.slurm_lock_file):
                log.debug("Setting up {0} (attempt {1}/5)".format(nfs_slurm_conf, i))
                try:
                    with open(nfs_slurm_conf, 'w') as f:
                        print >> f, _build_slurm_conf()
                    log.debug("Created slurm.conf as {0}".format(nfs_slurm_conf))
                    break
                except IOError, e:
                    log.error("Trouble creating {0}: {1}".format(nfs_slurm_conf, e))
                    time.sleep(2)
Ejemplo n.º 7
0
    def _setup_slurm_conf(self):
        """
        Setup ``slurm.conf`` configuration file
        """
        def _worker_nodes_conf():
            """
            Compose the conf lines pertaining to the worker nodes. Return two
            lists of strings: a one-per-line node specifications (eg,
            ``NodeName=w1 NodeAddr=<private_IP> Weight=5 State=UNKNOWN``) and
            a list of node names (eg, w1, w2).
            Note that only nodes in status ``Ready`` or ``Startup`` will be
            included.
            """
            wnc = ''
            wnn = ''
            for i, w in enumerate(self.app.manager.worker_instances):
                if w.worker_status in ['Ready', 'Startup']:
                    wnc += ('NodeName={0} NodeAddr={1} CPUs={2} RealMemory={3} Weight=5 State=UNKNOWN\n'
                            .format(w.alias, w.private_ip, w.num_cpus,
                                    max(1, w.total_memory / 1024)))
                    wnn += ',{0}'.format(w.alias)
            log.debug("Worker node names to include in slurm.conf: {0}".format(wnn[1:]))
            return wnc, wnn

        def _build_slurm_conf():
            log.debug("Setting slurm.conf parameters")
            # Make sure the slurm root dir exists and is owned by slurm user
            misc.make_dir(self.app.path_resolver.slurm_root_tmp)
            os.chown(self.app.path_resolver.slurm_root_tmp,
                     pwd.getpwnam("slurm")[2], grp.getgrnam("slurm")[2])
            worker_nodes, worker_names = _worker_nodes_conf()
            slurm_conf_template = conf_manager.load_conf_template(conf_manager.SLURM_CONF_TEMPLATE)
            slurm_conf_params = {
                "master_hostname": misc.get_hostname(),
                "num_cpus": max(self.app.manager.num_cpus - 1, 1),  # Reserve 1 CPU
                "total_memory": max(1, self.app.manager.total_memory / 1024),
                "slurm_root_tmp": self.app.path_resolver.slurm_root_tmp,
                "worker_nodes": worker_nodes,
                "worker_names": worker_names
            }
            return slurm_conf_template.substitute(slurm_conf_params)

        if not os.path.exists(self.app.path_resolver.slurm_root_nfs):
            misc.make_dir(self.app.path_resolver.slurm_root_nfs)
        nfs_slurm_conf = self.app.path_resolver.slurm_conf_nfs
        local_slurm_conf = self.app.path_resolver.slurm_conf_local
        # Ocasionally, NFS file is unavailable so try a few times
        for i in range(5):
            with flock(self.slurm_lock_file):
                log.debug("Setting up {0} (attempt {1}/5)".format(nfs_slurm_conf, i))
                try:
                    with open(nfs_slurm_conf, 'w') as f:
                        print >> f, _build_slurm_conf()
                    log.debug("Created slurm.conf as {0}".format(nfs_slurm_conf))
                    break
                except IOError, e:
                    log.error("Trouble creating {0}: {1}".format(nfs_slurm_conf, e))
                    time.sleep(2)
Ejemplo n.º 8
0
 def _build_slurm_conf():
     log.debug("Setting slurm.conf parameters")
     # Make sure the slurm root dir exists and is owned by slurm user
     misc.make_dir(self.app.path_resolver.slurm_root_tmp)
     os.chown(self.app.path_resolver.slurm_root_tmp,
              pwd.getpwnam("slurm")[2], grp.getgrnam("slurm")[2])
     worker_nodes, worker_names = _worker_nodes_conf()
     slurm_conf_template = conf_manager.load_conf_template(conf_manager.SLURM_CONF_TEMPLATE)
     slurm_conf_params = {
         "master_hostname": misc.get_hostname(),
         "num_cpus": max(self.app.manager.num_cpus - 1, 1),  # Reserve 1 CPU
         "total_memory": max(1, self.app.manager.total_memory / 1024),
         "slurm_root_tmp": self.app.path_resolver.slurm_root_tmp,
         "worker_nodes": worker_nodes,
         "worker_names": worker_names
     }
     return slurm_conf_template.substitute(slurm_conf_params)
Ejemplo n.º 9
0
 def _configure(self):
     """
     Setup Supervisor for running via CloudMan by creating
     ``supervisord.conf`` file.
     """
     log.debug("Configuring supervisord")
     # Create supervisord config file
     sv_vars = {
         'supervisord_pid': self.pid_file,
         'sv_port': self.sv_port,
         'conf_dir': self.conf_dir,
         'supervisord_log': self.log_file
     }
     template = conf_manager.load_conf_template(conf_manager.SUPERVISOR_TEMPLATE)
     misc.write_template_file(template, sv_vars, self.main_conf_file)
     # Make sure the config dir exists for programs managed by supervisor
     misc.make_dir(self.conf_dir)
Ejemplo n.º 10
0
 def _build_slurm_conf():
     log.debug("Setting slurm.conf parameters")
     # Make sure the slurm root dir exists and is owned by slurm user
     misc.make_dir(self.app.path_resolver.slurm_root_tmp)
     os.chown(self.app.path_resolver.slurm_root_tmp,
              pwd.getpwnam("slurm")[2], grp.getgrnam("slurm")[2])
     worker_nodes, worker_names = _worker_nodes_conf()
     slurm_conf_template = conf_manager.load_conf_template(conf_manager.SLURM_CONF_TEMPLATE)
     slurm_conf_params = {
         "master_hostname": misc.get_hostname(),
         "num_cpus": max(self.app.manager.num_cpus - 1, 1),  # Reserve 1 CPU
         "total_memory": max(1, self.app.manager.total_memory / 1024),
         "slurm_root_tmp": self.app.path_resolver.slurm_root_tmp,
         "worker_nodes": worker_nodes,
         "worker_names": worker_names
     }
     return slurm_conf_template.substitute(slurm_conf_params)
Ejemplo n.º 11
0
 def _configure(self):
     """
     Setup Supervisor for running via CloudMan by creating
     ``supervisord.conf`` file.
     """
     log.debug("Configuring supervisord")
     # Create supervisord config file
     sv_vars = {
         'supervisord_pid': self.pid_file,
         'sv_port': self.sv_port,
         'conf_dir': self.conf_dir,
         'supervisord_log': self.log_file
     }
     template = conf_manager.load_conf_template(
         conf_manager.SUPERVISOR_TEMPLATE)
     misc.write_template_file(template, sv_vars, self.main_conf_file)
     # Make sure the config dir exists for programs managed by supervisor
     misc.make_dir(self.conf_dir)
Ejemplo n.º 12
0
 def _setup_munge(self):
     """
     Setup Munge (used by Slurm as a user auth mechanism)
     """
     log.debug("Setting up Munge (for Slurm)...")
     if not os.path.exists('/etc/munge'):
         # Munge not installed so grab it
         misc.run("apt-get update; apt-get install munge libmunge-dev -y")
     misc.run("/usr/sbin/create-munge-key")
     misc.append_to_file('/etc/default/munge', 'OPTIONS="--force"')
     misc.run("service munge start")
     log.debug("Done setting up Munge")
     # Copy the munge key to cluster NFS
     if not os.path.exists(self.app.path_resolver.slurm_root_nfs):
         misc.make_dir(self.app.path_resolver.slurm_root_nfs)
     nfs_munge_key = os.path.join(self.app.path_resolver.slurm_root_nfs, 'munge.key')
     shutil.copyfile('/etc/munge/munge.key', nfs_munge_key)
     os.chmod(nfs_munge_key, 0400)
     log.debug("Copied /etc/munge/munge.key to {0}".format(nfs_munge_key))
Ejemplo n.º 13
0
 def _setup(self):
     log.debug("Running GalaxyReportsService _setup")
     reports_option_manager = DirectoryGalaxyOptionManager(
         self.app, conf_dir=self.conf_dir, conf_file_name='reports.ini')
     reports_option_manager.setup()
     file_path = os.path.join(self.app.path_resolver.galaxy_data, "files")
     misc.make_dir(file_path, owner='galaxy')
     new_file_path = os.path.join(self.app.path_resolver.galaxy_data, "tmp")
     main_props = {
         'database_connection':
         "postgres://galaxy@localhost:{0}/galaxy".format(
             self.app.path_resolver.psql_db_port),
         'filter-with':
         'proxy-prefix',
         'file_path':
         file_path,
         'new_file_path':
         new_file_path,
         'paste.app_factory':
         'galaxy.webapps.reports.buildapp:app_factory',
         'use_new_layout':
         'true'
     }
     proxy_props = {
         'use': 'egg:PasteDeploy#prefix',
         'prefix': '/reports',
     }
     server_props = {
         'use': "egg:Paste#http",
         'port': 9001,
         'host': '127.0.0.1',
         'use_threadpool': 'true',
         'threadpool_workers': 10
     }
     reports_option_manager.set_properties(server_props,
                                           section='server:main',
                                           description='server_main_props')
     reports_option_manager.set_properties(main_props,
                                           section='app:main',
                                           description='app_main_props')
     reports_option_manager.set_properties(proxy_props,
                                           section='filter:proxy-prefix',
                                           description='proxy_prefix_props')
Ejemplo n.º 14
0
 def _setup_munge(self):
     """
     Setup Munge (used by Slurm as a user auth mechanism)
     """
     log.debug("Setting up Munge (for Slurm)...")
     if not os.path.exists('/etc/munge'):
         # Munge not installed so grab it
         misc.run("apt-get update; apt-get install munge libmunge-dev -y")
     misc.run("/usr/sbin/create-munge-key")
     misc.append_to_file('/etc/default/munge', 'OPTIONS="--force"')
     misc.run("service munge start")
     log.debug("Done setting up Munge")
     # Copy the munge key to cluster NFS
     if not os.path.exists(self.app.path_resolver.slurm_root_nfs):
         misc.make_dir(self.app.path_resolver.slurm_root_nfs)
     nfs_munge_key = os.path.join(self.app.path_resolver.slurm_root_nfs, 'munge.key')
     shutil.copyfile('/etc/munge/munge.key', nfs_munge_key)
     os.chmod(nfs_munge_key, 0400)
     log.debug("Copied /etc/munge/munge.key to {0}".format(nfs_munge_key))
Ejemplo n.º 15
0
    def _setup(self):
        log.debug("Running GalaxyReportsService _setup")
        # WORKAROUND: The run_reports.sh command refers to a parameter
        # named --safe-pidfile which is not supported by the uwsgi binary.
        # Replace it with --pidfile instead.
        patch_start_command = ("sudo sed -i \"s/--safe-pidfile/--pidfile/g"
                               "\" %s/scripts/common_startup_functions.sh"
                               % self.galaxy_home)
        misc.run(patch_start_command)
        # Create default output dir for files
        file_path = os.path.join(self.app.path_resolver.galaxy_home, "database/files")
        misc.make_dir(file_path, owner='galaxy')
        tmp_file_path = os.path.join(self.app.path_resolver.galaxy_home, "database/tmp")
        misc.make_dir(tmp_file_path, owner='galaxy')

        # Create the new reports config
        params = {
            'galaxy_db_port': self.app.path_resolver.psql_db_port
        }
        template = conf_manager.load_conf_template(conf_manager.GALAXY_REPORTS_TEMPLATE)
        misc.write_template_file(template, params, self.conf_file)
        attempt_chown_galaxy(self.conf_file)
Ejemplo n.º 16
0
 def _configure(self):
     """
     Configure the Pulsar application.
     """
     self.state = service_states.CONFIGURING
     misc.make_dir(self.pulsar_home)
     # Write out app.yml
     token = ''.join(
         random.SystemRandom().choice(string.ascii_uppercase +
                                      string.lowercase + string.digits)
         for _ in range(25))
     app_template = Template(app_yml)
     app_yml_file = os.path.join(self.pulsar_home, 'app.yml')
     misc.write_template_file(app_template, {'token': token}, app_yml_file)
     # Write out server.ini
     srvr_template = Template(server_ini)
     server_ini_file = os.path.join(self.pulsar_home, 'server.ini')
     misc.write_template_file(srvr_template,
                              {'pulsar_port': self.pulsar_port},
                              server_ini_file)
     # Write out local_env.sh
     lcl_template = Template(local_env_sh)
     lcl_file = os.path.join(self.pulsar_home, 'local_env.sh')
     misc.write_template_file(lcl_template,
                              {'galaxy_home': '/mnt/galaxy/galaxy-app'},
                              lcl_file)
     # Set the owner to 'galaxy' system user
     attempt_chown_galaxy(self.pulsar_home, recursive=True)
     if self.supervisor:
         # Create a supervisor conf file
         supervisor_conf_file = os.path.join(
             self.supervisor.conf_dir,
             '{0}.conf'.format(self.supervisor_prog_name))
         template = Template(supervisor_conf)
         misc.write_template_file(template, None, supervisor_conf_file)
     else:
         log.warning("No supervisor service?")
Ejemplo n.º 17
0
 def _setup(self):
     log.debug("Running GalaxyReportsService _setup")
     reports_option_manager = DirectoryGalaxyOptionManager(self.app,
                                                           conf_dir=self.conf_dir,
                                                           conf_file_name='reports.ini')
     reports_option_manager.setup()
     file_path = os.path.join(self.app.path_resolver.galaxy_data, "files")
     misc.make_dir(file_path, owner='galaxy')
     new_file_path = os.path.join(self.app.path_resolver.galaxy_data, "tmp")
     main_props = {
         'database_connection': "postgres://galaxy@localhost:{0}/galaxy"
                                .format(self.app.path_resolver.psql_db_port),
         'filter-with': 'proxy-prefix',
         'file_path': file_path,
         'new_file_path': new_file_path,
         'paste.app_factory': 'galaxy.webapps.reports.buildapp:app_factory',
         'use_new_layout': 'true'
     }
     proxy_props = {
         'use': 'egg:PasteDeploy#prefix',
         'prefix': '/reports',
     }
     server_props = {
         'use': "egg:Paste#http",
         'port': 9001,
         'host': '127.0.0.1',
         'use_threadpool': 'true',
         'threadpool_workers': 10
     }
     reports_option_manager.set_properties(server_props, section='server:main',
                                           description='server_main_props')
     reports_option_manager.set_properties(main_props, section='app:main',
                                           description='app_main_props')
     reports_option_manager.set_properties(proxy_props,
                                           section='filter:proxy-prefix',
                                           description='proxy_prefix_props')
Ejemplo n.º 18
0
    def manage_galaxy(self, to_be_started=True):
        """
        Use this method to start and stop Galaxy application.

        :type to_be_started: bool
        :param to_be_started: If set, this method will attempt to start the
                              Galaxy application process. If not set, the
                              method will attempt to shut down the application
                              process.
        """
        log.debug("Using Galaxy from '{0}'".format(self.galaxy_home))
        os.putenv("GALAXY_HOME", self.galaxy_home)
        os.putenv("TEMP", self.app.path_resolver.galaxy_temp)
        os.putenv("TMPDIR", self.app.path_resolver.galaxy_temp)
        self.env_vars["GALAXY_HOME"] = self.galaxy_home
        self.env_vars["TEMP"] = self.app.path_resolver.galaxy_temp
        self.env_vars["TMPDIR"] = self.app.path_resolver.galaxy_temp
        conf_dir = self.option_manager.setup()
        if conf_dir:
            self.env_vars["GALAXY_UNIVERSE_CONFIG_DIR"] = conf_dir

        if self.multiple_processes():
            self.env_vars["GALAXY_RUN_ALL"] = "TRUE"
            # HACK: Galaxy has a known problem when starting from a fresh
            # configuration in multiple process mode. Each process attempts to
            # create the same directories and one or more processes can fail to
            # start because it "failed" to create said directories (because
            # another process created them first). This hack staggers
            # the process starts in an attempt to circumvent this problem.
            patch_run_sh_command = (
                "sudo sed -i -e \"s/server.log \\$\\@$/\\0; "
                "sleep 4/\" %s/run.sh" % self.galaxy_home)
            misc.run(patch_run_sh_command)
            self.extra_daemon_args = ""
        else:
            # Instead of sticking with default paster.pid and paster.log,
            # explicitly set pid and log file to ``main.pid`` and ``main.log``
            # to bring single process case inline with defaults for for multiple
            # process case (i.e. when GALAXY_RUN_ALL is set and multiple servers
            # are defined).
            self.extra_daemon_args = "--pid-file=main.pid --log-file=main.log"
        if to_be_started and self.remaining_start_attempts > 0:
            self.status()
            if not self.configured:
                log.debug("Setting up Galaxy application")
                # Set job manager configs if necessary
                for job_manager_svc in self.app.manager.service_registry.active(
                        service_role=ServiceRole.JOB_MANAGER):
                    if ServiceRole.SGE in job_manager_svc.svc_roles:
                        log.debug("Running on SGE; setting env_vars")
                        self.env_vars[
                            "SGE_ROOT"] = self.app.path_resolver.sge_root,
                        self.env_vars[
                            "DRMAA_LIBRARY_PATH"] = self.app.path_resolver.drmaa_library_path
                # Make sure Galaxy home dir exists
                if not os.path.exists(self.galaxy_home):
                    log.error("Galaxy application directory '%s' does not "
                              "exist! Aborting." % self.galaxy_home)
                    log.debug("ls /mnt/: %s" % os.listdir('/mnt/'))
                    self.state = service_states.ERROR
                    self.last_state_change_time = datetime.utcnow()
                    return False
                # Ensure the necessary directories exist
                for dir_name in [
                        paths.P_GALAXY_INDICES,
                    ('%s/tmp/job_working_directory' %
                     self.app.path_resolver.galaxy_data)
                ]:
                    misc.make_dir(dir_name, 'galaxy')
                self.configured = True
            if not self._is_galaxy_running():
                log.debug("Starting Galaxy...")
                self.update_galaxy_config()
                start_command = self.galaxy_run_command("%s --daemon" %
                                                        self.extra_daemon_args)
                if misc.run(start_command):
                    self.remaining_start_attempts -= 1
                elif self.remaining_start_attempts > 0:
                    log.debug(
                        "It seems Galaxy failed to start; will atempt to "
                        "auto-restart (up to {0} more time(s)).".format(
                            self.remaining_start_attempts))
                    self.state = service_states.UNSTARTED
                    self.last_state_change_time = datetime.utcnow()
                else:
                    log.debug(
                        "It seems Galaxy failed to start; setting service "
                        "state to {0}.".format(service_states.ERROR))
                    self.state = service_states.ERROR
                    self.last_state_change_time = datetime.utcnow()
            else:
                log.debug("Galaxy already running.")
        else:
            log.info("Shutting down Galaxy...")
            self.state = service_states.SHUTTING_DOWN
            stop_command = self.galaxy_run_command("%s --stop-daemon" %
                                                   self.extra_daemon_args)
            if self._is_galaxy_running():
                misc.run(stop_command)
            if not self._is_galaxy_running():
                log.debug(
                    "Galaxy not running; setting service state to SHUT_DOWN.")
                self.state = service_states.SHUT_DOWN
                self.last_state_change_time = datetime.utcnow()
                # Move all log files
                subprocess.call(
                    "bash -c 'for f in $GALAXY_HOME/{main,handler,manager,web}*.log; "
                    "do mv \"$f\" \"$f.%s\"; done'" %
                    datetime.utcnow().strftime('%H_%M'),
                    shell=True)
Ejemplo n.º 19
0
    def manage_galaxy(self, to_be_started=True):
        """
        Use this method to start and stop Galaxy application.

        :type to_be_started: bool
        :param to_be_started: If set, this method will attempt to start the
                              Galaxy application process. If not set, the
                              method will attempt to shut down the application
                              process.
        """
        log.debug("Using Galaxy from '{0}'".format(self.galaxy_home))
        os.putenv("GALAXY_HOME", self.galaxy_home)
        os.putenv("TEMP", self.app.path_resolver.galaxy_temp)
        os.putenv("TMPDIR", self.app.path_resolver.galaxy_temp)
        self.env_vars["GALAXY_HOME"] = self.galaxy_home
        self.env_vars["TEMP"] = self.app.path_resolver.galaxy_temp
        self.env_vars["TMPDIR"] = self.app.path_resolver.galaxy_temp
        conf_dir = self.option_manager.setup()
        if conf_dir:
            self.env_vars["GALAXY_UNIVERSE_CONFIG_DIR"] = conf_dir

        if self.multiple_processes():
            self.env_vars["GALAXY_RUN_ALL"] = "TRUE"
            # HACK: Galaxy has a known problem when starting from a fresh
            # configuration in multiple process mode. Each process attempts to
            # create the same directories and one or more processes can fail to
            # start because it "failed" to create said directories (because
            # another process created them first). This hack staggers
            # the process starts in an attempt to circumvent this problem.
            patch_run_sh_command = ("sudo sed -i -e \"s/server.log \\$\\@$/\\0; "
                                    "sleep 4/\" %s/run.sh" % self.galaxy_home)
            misc.run(patch_run_sh_command)
            self.extra_daemon_args = ""
        else:
            # Instead of sticking with default paster.pid and paster.log,
            # explicitly set pid and log file to ``main.pid`` and ``main.log``
            # to bring single process case inline with defaults for for multiple
            # process case (i.e. when GALAXY_RUN_ALL is set and multiple servers
            # are defined).
            self.extra_daemon_args = "--pid-file=main.pid --log-file=main.log"
        if to_be_started and self.remaining_start_attempts > 0:
            self.status()
            if not self.configured:
                log.debug("Setting up Galaxy application")
                # Set job manager configs if necessary
                for job_manager_svc in self.app.manager.service_registry.active(
                        service_role=ServiceRole.JOB_MANAGER):
                    if ServiceRole.SGE in job_manager_svc.svc_roles:
                        log.debug("Running on SGE; setting env_vars")
                        self.env_vars["SGE_ROOT"] = self.app.path_resolver.sge_root,
                        self.env_vars["DRMAA_LIBRARY_PATH"] = self.app.path_resolver.drmaa_library_path
                # Make sure Galaxy home dir exists
                if not os.path.exists(self.galaxy_home):
                    log.error("Galaxy application directory '%s' does not "
                              "exist! Aborting." % self.galaxy_home)
                    log.debug("ls /mnt/: %s" % os.listdir('/mnt/'))
                    self.state = service_states.ERROR
                    self.last_state_change_time = datetime.utcnow()
                    return False
                # Ensure the necessary directories exist
                for dir_name in [paths.P_GALAXY_INDICES,
                                 ('%s/tmp/job_working_directory' %
                                  self.app.path_resolver.galaxy_data)]:
                    misc.make_dir(dir_name, 'galaxy')
                self.configured = True
            if not self._is_galaxy_running():
                log.debug("Starting Galaxy...")
                self.update_galaxy_config()
                start_command = self.galaxy_run_command(
                    "%s --daemon" % self.extra_daemon_args)
                if misc.run(start_command):
                    self.remaining_start_attempts -= 1
                elif self.remaining_start_attempts > 0:
                    log.debug("It seems Galaxy failed to start; will atempt to "
                              "auto-restart (up to {0} more time(s))."
                              .format(self.remaining_start_attempts))
                    self.state = service_states.UNSTARTED
                    self.last_state_change_time = datetime.utcnow()
                else:
                    log.debug("It seems Galaxy failed to start; setting service "
                              "state to {0}.".format(service_states.ERROR))
                    self.state = service_states.ERROR
                    self.last_state_change_time = datetime.utcnow()
            else:
                log.debug("Galaxy already running.")
        else:
            log.info("Shutting down Galaxy...")
            self.state = service_states.SHUTTING_DOWN
            stop_command = self.galaxy_run_command(
                "%s --stop-daemon" % self.extra_daemon_args)
            if self._is_galaxy_running():
                misc.run(stop_command)
            if not self._is_galaxy_running():
                log.debug("Galaxy not running; setting service state to SHUT_DOWN.")
                self.state = service_states.SHUT_DOWN
                self.last_state_change_time = datetime.utcnow()
                # Move all log files
                subprocess.call("bash -c 'for f in $GALAXY_HOME/{main,handler,manager,web}*.log; "
                                "do mv \"$f\" \"$f.%s\"; done'" % datetime.utcnow()
                                .strftime('%H_%M'), shell=True)