示例#1
0
文件: pss.py 项目: jmchilton/cloudman
 def start(self):
     """ Wait until all other services are running before starting this one."""
     log.debug("Starting %s service" % self.name)
     # All other services OK, start this one now
     self.state = service_states.RUNNING
     log.debug("%s service prerequisites OK (i.e., all other services running), "
               "checking if %s was provided..." % (self.name, self.pss_filename))
     local_pss_file = os.path.join(
         self.app.ud['cloudman_home'], self.pss_filename)
     # Check user data first to allow overwriting of a potentially existing
     # script
     if self.pss_url:
         # This assumes the provided URL is readable to anyone w/o authentication
         # First check if the file actually exists
         if misc.run('wget --server-response %s' % self.pss_url):
             misc.run('wget --output-document=%s %s' % (
                 local_pss_file, self.pss_url))
         else:
             log.error(
                 "Specified post_start_script url (%s) does not exist" % self.pss_url)
     else:
         s3_conn = self.app.cloud_interface.get_s3_connection()
         b = None
         if s3_conn and 'bucket_cluster' in self.app.ud:
             b = s3_conn.lookup(self.app.ud['bucket_cluster'])
         if b is not None:  # Check if an existing cluster has a stored post start script
             log.debug("Cluster bucket '%s' found; looking for post start script '%s'"
                       % (b.name, self.pss_filename))
             misc.get_file_from_bucket(
                 s3_conn, b.name, self.pss_filename, local_pss_file)
     if os.path.exists(local_pss_file) and os.path.getsize(local_pss_file) > 0:
         log.info("%s found and saved to '%s'; running it now (note that this may take a while)"
                  % (self.pss_filename, os.path.join(self.app.ud['cloudman_home'], self.pss_filename)))
         os.chmod(local_pss_file, 0755)  # Ensure the script is executable
         misc.run('cd %s;./%s' % (self.app.ud[
                  'cloudman_home'], self.pss_filename))
         self.save_to_bucket()
         log.info("Done running {0}".format(self.pss_filename))
     else:
         log.debug("%s does not exist or could not be downloaded; continuing without running it."
                   % self.name)
     # Prime the object with instance data (because this may take a while
     # on some clouds, do so in a separate thread)
     threading.Thread(target=self._prime_data).start()
     self.state = service_states.SHUT_DOWN
     log.debug("%s service done and marked as '%s'" % (self.name, self.state))
     if self.instance_role == 'master':
         # On master, remove the service upon completion (PSS runs only
         # once)
         self.remove()
     self.state = service_states.COMPLETED
     # Once this service is complete, it's safe to assume the cluster is
     # READY
     self.app.manager.cluster_status = cluster_status.READY
     msg = "All cluster services started; the cluster is ready for use."
     log.info(msg)
     self.app.msgs.info(msg)
示例#2
0
文件: pss.py 项目: martenson/cloudman
 def _fetch_script_from_bucket(self, script_name, target_path):
     # Try to download the pss from the cluster's bucket
     cluster_bucket_name = self.app.config['bucket_cluster']
     log.debug("Attempting to fetch script {0} from cluster bucket ({1})."
                   .format(script_name, cluster_bucket_name))
     s3_conn = self.app.cloud_interface.get_s3_connection()
     return misc.get_file_from_bucket(s3_conn, cluster_bucket_name,
                               script_name, target_path)
示例#3
0
 def _fetch_script_from_bucket(self, script_name, target_path):
     # Try to download the pss from the cluster's bucket
     cluster_bucket_name = self.app.config['bucket_cluster']
     log.debug(
         "Attempting to fetch script {0} from cluster bucket ({1}).".format(
             script_name, cluster_bucket_name))
     s3_conn = self.app.cloud_interface.get_s3_connection()
     return misc.get_file_from_bucket(s3_conn, cluster_bucket_name,
                                      script_name, target_path)
示例#4
0
    def check_for_existing_volumes(self):
        """Check if there are any data volumes attached to the running
        instance. If yes, based on their
        """
        if self.app.TESTFLAG is True:
            return True
        s3_conn = self.app.cloud_interface.get_s3_connection()
        created_vols = ''
        attached_vols = ''
        idd_volumes = {}

        # Get existing volumes from respective files in current cluster's bucket
        # Check for attached volumes first becauce in the process we discover
        # created ones as well. If no record of attached volumes exists, check
        # for created ones.
        c_vols_file = 'created_volumes.txt'
        a_vols_file = 'attached_volumes.txt'
        if misc.get_file_from_bucket(s3_conn, self.app.ud['bucket_cluster'], a_vols_file, a_vols_file):
            f = open(a_vols_file, 'r')
            attached_vols = f.readlines()
            f.close()
            log.debug(
                "Retrieved following volumes potentially attached to current instance: %s" % attached_vols)
            for attached_vol in attached_vols:
                try:
                    # Each line of attached_vol must be formatted as follows:
                    # <file system name>@<volume ID>@<attached device ID>
                    vol_name = attached_vol.split('@')[0].strip()
                    vol_id = attached_vol.split('@')[1].strip()
                    dev_id = attached_vol.split('@')[2].strip()
                    vol_status = self.check_volume(vol_name, vol_id)
                    # If found vol does not exist, don't create reference to it
                    if vol_status is not None:
                        fs_status = self.check_file_system(vol_name, dev_id)
                        idd_volumes[vol_name] = [
                            vol_id, dev_id, vol_status, fs_status]
                except Exception, e:
                    log.error("Wrong format of line (%s) from attached volumes file. Exception: %s" %
                              (attached_vol, e))
            return idd_volumes
示例#5
0
    def __init__(self, **kwargs):
        print "Python version: ", sys.version_info[:2]
        self.PERSISTENT_DATA_VERSION = 3  # Current expected and generated PD version
        self.DEPLOYMENT_VERSION = 2
        # Instance persistent data file. This file gets created for
        # test/transient cluster types and stores the cluster config. In case
        # of a reboot, read the file to automatically recreate the services.
        self.INSTANCE_PD_FILE = '/mnt/persistent_data-current.yaml'
        cc = CloudConfig(app=self)
        # Get the type of cloud currently running on
        self.cloud_type = cc.get_cloud_type()
        # Create an appropriate cloud connection
        self.cloud_interface = cc.get_cloud_interface(self.cloud_type)
        # Read config file and check for errors
        self.config = config.Configuration(self, kwargs, self.cloud_interface.get_user_data())
        # From user data determine if object store (S3) should be used.
        self.use_object_store = self.config.get("use_object_store", True)
        # From user data determine if block storage (EBS/nova-volume) should be used.
        # (OpenNebula and dummy clouds do not support volumes yet so skip those)
        self.use_volumes = self.config.get(
            "use_volumes", self.cloud_type not in ['opennebula', 'dummy'])
#         self.config.init_with_user_data(self.ud)
        self.config.validate()
        # Setup logging
        self.logger = CMLogHandler()
        if "testflag" in self.config:
            self.TESTFLAG = bool(self.config['testflag'])
            self.logger.setLevel(logging.DEBUG)
        else:
            self.TESTFLAG = False
            self.logger.setLevel(logging.INFO)

        if "localflag" in self.config:
            self.LOCALFLAG = bool(self.config['localflag'])
            self.logger.setLevel(logging.DEBUG)
        else:
            self.LOCALFLAG = False
            self.logger.setLevel(logging.INFO)
        log.addHandler(self.logger)
        config.configure_logging(self.config)
        log.debug("Initializing app")
        log.debug("Running on '{0}' type of cloud in zone '{1}' using image '{2}'."
                  .format(self.cloud_type, self.cloud_interface.get_zone(),
                          self.cloud_interface.get_ami()))

        # App-wide object to store messages that need to travel between the back-end
        # and the UI.
        # TODO: Ideally, this should be stored some form of more persistent
        # medium (eg, database, file, session) and used as a simple module (vs. object)
        # but that's hopefully still forthcoming.
        self.msgs = messages.Messages()

        # App-wide consecutive number generator. Starting at 1, each time `next`
        # is called, get the next integer.
        self.number_generator = misc.get_a_number()

        # Check that we actually got user creds in user data and inform user
        if not ('access_key' in self.config or 'secret_key' in self.config):
            self.msgs.error("No access credentials provided in user data. "
                            "You will not be able to add any services.")
        # Update user data to include persistent data stored in cluster's bucket, if it exists
        # This enables cluster configuration to be recovered on cluster re-
        # instantiation
        self.manager = None
        pd = None
        if self.use_object_store and 'bucket_cluster' in self.config:
            log.debug("Looking for existing cluster persistent data (PD).")
            validate = True if self.cloud_type == 'ec2' else False
            if not self.TESTFLAG and misc.get_file_from_bucket(
                    self.cloud_interface.get_s3_connection(),
                    self.config['bucket_cluster'],
                    'persistent_data.yaml', 'pd.yaml',
                    validate=validate):
                        log.debug("Loading bucket PD file pd.yaml")
                        pd = misc.load_yaml_file('pd.yaml')
        # Have not found the file in the cluster bucket, look on the instance
        if not pd:
            if os.path.exists(self.INSTANCE_PD_FILE):
                log.debug("Loading instance PD file {0}".format(self.INSTANCE_PD_FILE))
                pd = misc.load_yaml_file(self.INSTANCE_PD_FILE)
        if pd:
            self.config.user_data = misc.merge_yaml_objects(self.config.user_data, pd)
            self.config.user_data = misc.normalize_user_data(self, self.config.user_data)
        else:
            log.debug("No PD to go by. Setting deployment_version to {0}."
                      .format(self.DEPLOYMENT_VERSION))
            # This is a new cluster so default to the current deployment version
            self.config.user_data['deployment_version'] = self.DEPLOYMENT_VERSION
示例#6
0
 def __init__( self, **kwargs ):
     print "Python version: ", sys.version_info[:2]
     cc = CloudConfig(app=self)
     # Get the type of cloud currently running on
     self.cloud_type = cc.get_cloud_type()
     # Create an approprite cloud connection
     self.cloud_interface = cc.get_cloud_interface(self.cloud_type)
     # Load user data into a local field through a cloud interface
     self.ud = self.cloud_interface.get_user_data()
     # From user data determine if object store (S3) should be used.
     self.use_object_store = self.ud.get("use_object_store", True)
     # Read config file and check for errors
     self.config = config.Configuration( **kwargs )
     self.config.check()
     # Setup logging
     self.logger = CMLogHandler(self)
     if self.ud.has_key("testflag"):
         self.TESTFLAG = bool(self.ud['testflag'])
         self.logger.setLevel(logging.DEBUG)
     else:
         self.TESTFLAG = False
         self.logger.setLevel(logging.INFO)
     
     if self.ud.has_key("localflag"):
         self.LOCALFLAG = bool(self.ud['localflag'])
         self.logger.setLevel(logging.DEBUG)
     else:
         self.LOCALFLAG = False
         self.logger.setLevel(logging.INFO)
     log.addHandler(self.logger)
     config.configure_logging(self.config)
     log.debug( "Initializing app" )
     log.debug("Running on '{0}' type of cloud.".format(self.cloud_type))
     
     # App-wide object to store messages that need to travel between the back-end
     # and the UI. 
     # TODO: Ideally, this should be stored some form of more persistent
     # medium (eg, database, file, session) and used as a simple module (vs. object)
     # but that's hopefully still forthcoming.
     self.msgs = messages.Messages()
     
     # Check that we actually got user creds in user data and inform user
     if not ('access_key' in self.ud or 'secret_key' in self.ud):
         self.msgs.error("No access credentials provided in user data. "
             "You will not be able to add any services.")
     # Update user data to include persistent data stored in cluster's bucket, if it exists
     # This enables cluster configuration to be recovered on cluster re-instantiation
     self.manager = None
     if self.use_object_store and self.ud.has_key('bucket_cluster'):
         log.debug("Getting pd.yaml")
         if misc.get_file_from_bucket(self.cloud_interface.get_s3_connection(), self.ud['bucket_cluster'], 'persistent_data.yaml', 'pd.yaml'):
             pd = misc.load_yaml_file('pd.yaml')
             self.ud = misc.merge_yaml_objects(self.ud, pd)
     if self.ud.has_key('role'):
         if self.ud['role'] == 'master':
             log.info( "Master starting" )
             from cm.util import master
             self.manager = master.ConsoleManager(self)
         elif self.ud['role'] == 'worker':
             log.info( "Worker starting" )
             from cm.util import worker
             self.manager = worker.ConsoleManager(self)
         self.manager.console_monitor.start()
     else:
         log.error("************ No ROLE in %s - this is a fatal error. ************" % paths.USER_DATA_FILE)
示例#7
0
文件: app.py 项目: jmchilton/cloudman
    def __init__(self, **kwargs):
        print "Python version: ", sys.version_info[:2]
        self.PERSISTENT_DATA_VERSION = 3  # Current expected and generated PD version
        self.DEPLOYMENT_VERSION = 2
        cc = CloudConfig(app=self)
        # Get the type of cloud currently running on
        self.cloud_type = cc.get_cloud_type()
        # Create an appropriate cloud connection
        self.cloud_interface = cc.get_cloud_interface(self.cloud_type)
        # Load user data into a local field through a cloud interface
        self.ud = self.cloud_interface.get_user_data()
        # From user data determine if object store (S3) should be used.
        self.use_object_store = self.ud.get("use_object_store", True)
        # From user data determine if block storage (EBS/nova-volume) should be used.
        # (OpenNebula and dummy clouds do not support volumes yet so skip those)
        self.use_volumes = self.ud.get(
            "use_volumes", self.cloud_type not in ['opennebula', 'dummy'])
        # Read config file and check for errors
        self.config = config.Configuration(**kwargs)
        self.config.init_with_user_data(self.ud)
        self.config.check()
        # Setup logging
        self.logger = CMLogHandler(self)
        if "testflag" in self.ud:
            self.TESTFLAG = bool(self.ud['testflag'])
            self.logger.setLevel(logging.DEBUG)
        else:
            self.TESTFLAG = False
            self.logger.setLevel(logging.INFO)

        if "localflag" in self.ud:
            self.LOCALFLAG = bool(self.ud['localflag'])
            self.logger.setLevel(logging.DEBUG)
        else:
            self.LOCALFLAG = False
            self.logger.setLevel(logging.INFO)
        log.addHandler(self.logger)
        config.configure_logging(self.config, self.ud)
        log.debug("Initializing app")
        log.debug("Running on '{0}' type of cloud in zone '{1}' using image '{2}'."
                  .format(self.cloud_type, self.cloud_interface.get_zone(),
                          self.cloud_interface.get_ami()))

        # App-wide object to store messages that need to travel between the back-end
        # and the UI.
        # TODO: Ideally, this should be stored some form of more persistent
        # medium (eg, database, file, session) and used as a simple module (vs. object)
        # but that's hopefully still forthcoming.
        self.msgs = messages.Messages()

        # Check that we actually got user creds in user data and inform user
        if not ('access_key' in self.ud or 'secret_key' in self.ud):
            self.msgs.error("No access credentials provided in user data. "
                            "You will not be able to add any services.")
        # Update user data to include persistent data stored in cluster's bucket, if it exists
        # This enables cluster configuration to be recovered on cluster re-
        # instantiation
        self.manager = None
        if self.use_object_store and 'bucket_cluster' in self.ud:
            log.debug("Getting pd.yaml")
            if misc.get_file_from_bucket(self.cloud_interface.get_s3_connection(),
               self.ud['bucket_cluster'], 'persistent_data.yaml', 'pd.yaml'):
                pd = misc.load_yaml_file('pd.yaml')
                self.ud = misc.merge_yaml_objects(self.ud, pd)
                self.ud = misc.normalize_user_data(self, self.ud)
            else:
                log.debug("Setting deployment_version to {0}".format(self.DEPLOYMENT_VERSION))
                # This is a new cluster so default to the current version
                self.ud['deployment_version'] = self.DEPLOYMENT_VERSION
示例#8
0
 def manage_galaxy( self, to_be_started=True ):
     if self.app.TESTFLAG is True and self.app.LOCALFLAG is False:
         log.debug( "Attempted to manage Galaxy, but TESTFLAG is set." )
         return
     os.putenv( "GALAXY_HOME", self.galaxy_home )
     os.putenv( "TEMP", '/mnt/galaxyData/tmp' )
     if to_be_started:
         self.status()
         if not self.configured:
             log.info( "Setting up Galaxy application" )
             s3_conn = None
             if self.app.use_object_store:
                 s3_conn = self.app.cloud_interface.get_s3_connection()
             if not os.path.exists(self.galaxy_home):
                 log.error("Galaxy application directory '%s' does not exist! Aborting." % self.galaxy_home)
                 log.debug("ls /mnt/: %s" % os.listdir('/mnt/'))
                 self.state = service_states.ERROR
                 self.last_state_change_time = datetime.utcnow()
                 return False
             # Retrieve config files from a persistent data repository (i.e., S3)
             if not misc.get_file_from_bucket( s3_conn, self.app.ud['bucket_cluster'], 'universe_wsgi.ini.cloud', self.galaxy_home + '/universe_wsgi.ini' ):
                 log.debug("Did not get Galaxy configuration file from cluster bucket '%s'" % self.app.ud['bucket_cluster'])
                 log.debug("Trying to retrieve latest one (universe_wsgi.ini.cloud) from '%s' bucket..." % self.app.ud['bucket_default'])
                 misc.get_file_from_bucket( s3_conn, self.app.ud['bucket_default'], 'universe_wsgi.ini.cloud', self.galaxy_home + '/universe_wsgi.ini' )
             self.add_galaxy_admin_users()
             self.add_dynamic_galaxy_options()
             universe_wsgi_path = os.path.join(self.galaxy_home, "universe_wsgi.ini")
             self._attempt_chown_galaxy_if_exists(universe_wsgi_path)
             if not misc.get_file_from_bucket( s3_conn, self.app.ud['bucket_cluster'], 'tool_conf.xml.cloud', self.galaxy_home + '/tool_conf.xml' ):
                 log.debug("Did not get Galaxy tool configuration file from cluster bucket '%s'" % self.app.ud['bucket_cluster'])
                 log.debug("Trying to retrieve latest one (tool_conf.xml.cloud) from '%s' bucket..." % self.app.ud['bucket_default'])
                 misc.get_file_from_bucket( s3_conn, self.app.ud['bucket_default'], 'tool_conf.xml.cloud', self.galaxy_home + '/tool_conf.xml' )
             tool_conf_path = os.path.join(self.galaxy_home, "tool_conf.xml")
             self._attempt_chown_galaxy_if_exists(tool_conf_path)
             if not misc.get_file_from_bucket( s3_conn, self.app.ud['bucket_cluster'], 'tool_data_table_conf.xml.cloud', self.galaxy_home + '/tool_data_table_conf.xml.cloud' ):
                 log.debug("Did not get Galaxy tool_data_table_conf.xml.cloud file from cluster bucket '%s'" % self.app.ud['bucket_cluster'])
                 log.debug("Trying to retrieve latest one (tool_data_table_conf.xml.cloud) from '%s' bucket..." % self.app.ud['bucket_default'])
                 misc.get_file_from_bucket( s3_conn, self.app.ud['bucket_default'], 'tool_data_table_conf.xml.cloud', self.galaxy_home + '/tool_data_table_conf.xml.cloud' )
             try:
                 tool_data_table_conf_path = os.path.join(self.galaxy_home, 'tool_data_table_conf.xml.cloud')
                 if os.path.exists(tool_data_table_conf_path):
                     shutil.copy(tool_data_table_conf_path, '%s/tool_data_table_conf.xml' % self.galaxy_home)
                     self._attempt_chown_galaxy(self.galaxy_home + '/tool_data_table_conf.xml')
             except:
                 pass
             # 
             #===============================================================
             
             # Make sure the temporary job_working_directory exists on user data volume (defined in universe_wsgi.ini.cloud)
             if not os.path.exists('%s/tmp/job_working_directory' % paths.P_GALAXY_DATA):
                 os.makedirs('%s/tmp/job_working_directory/' % paths.P_GALAXY_DATA)
             self._attempt_chown_galaxy('%s/tmp/job_working_directory/' % paths.P_GALAXY_DATA)
             # Setup environemnt for the FTP server and start it
             if not os.path.exists('%s/tmp/ftp' % paths.P_GALAXY_DATA):
                 os.makedirs('%s/tmp/ftp' % paths.P_GALAXY_DATA)
             misc.run('/etc/init.d/proftpd start', 'Failed to start FTP server', "Started FTP server")
             # TEMPORARY ONLY - UNTIL SAMTOOLS WRAPPER IS CONVERTED TO USE DATA TABLES
             if os.path.exists('/mnt/galaxyIndices/locfiles/sam_fa_indices.loc'):
                 shutil.copy('/mnt/galaxyIndices/locfiles/sam_fa_indices.loc', '%s/tool-data/sam_fa_indices.loc' % paths.P_GALAXY_HOME)
             # Ensure the environment is setup for running Galaxy
             # This can also be setup on the tools snapshot and thus avoid these patches
             # try:
             #     subprocess.call( "sed 's/cd `dirname $0`/cd `dirname $0`; export TEMP=\/mnt\/galaxyData\/tmp/; export DRMAA_LIBRARY_PATH=/opt/sge/lib/lx24-amd64/libdrmaa.so.1.0' %s/run.sh > %s/run.sh.custom" % (self.galaxy_home, self.galaxy_home), shell=True )
             #     misc.run("cd %s; sed 's/pyhton/python -ES/g' run.sh.custom > run.sh" % self.galaxy_home, "Failed to adjust run.sh", "Successfully adjusted run.sh")
             #     shutil.copy( self.galaxy_home + '/run.sh.custom', self.galaxy_home + '/run.sh' )
             #     os.chown( self.galaxy_home + '/run.sh', pwd.getpwnam( "galaxy" )[2], grp.getgrnam( "galaxy" )[2] )
             # except Exception, e:
             #     log.debug("Problem customizing Galaxy's run.sh: %s" % e)
             # try:
             #     misc.run("cd %s; sed 's/pyhton/python -ES/g' setup.sh > setup.sh.custom" % self.galaxy_home, "Failed to edit setup.sh", "Successfully adjusted setup.sh")
             #     shutil.copy( self.galaxy_home + '/setup.sh.custom', self.galaxy_home + '/setup.sh' )
             #     os.chown( self.galaxy_home + '/setup.sh', pwd.getpwnam( "galaxy" )[2], grp.getgrnam( "galaxy" )[2] )
             # except Exception, e:
             #     log.error("Error adjusting setup.sh: %s" % e)
             # subprocess.call( 'sed "s/#start_job_runners = pbs/start_job_runners = sge/" $GALAXY_HOME/universe_wsgi.ini > $GALAXY_HOME/universe_wsgi.ini.custom', shell=True )
             # shutil.move( self.galaxy_home + '/universe_wsgi.ini.custom', self.galaxy_home + '/universe_wsgi.ini' )
             # subprocess.call( 'sed "s/#default_cluster_job_runner = pbs:\/\/\//default_cluster_job_runner = sge:\/\/\//" $GALAXY_HOME/universe_wsgi.ini > $GALAXY_HOME/universe_wsgi.ini.custom', shell=True )
             # shutil.move( self.galaxy_home + '/universe_wsgi.ini.custom', self.galaxy_home + '/universe_wsgi.ini' )
             # Configure PATH in /etc/profile because otherwise some tools do not work
             # with open('/etc/profile', 'a') as f:
             #     f.write('export PATH=/mnt/galaxyTools/tools/bin:/mnt/galaxyTools/tools/pkg/fastx_toolkit_0.0.13:/mnt/galaxyTools/tools/pkg/bowtie-0.12.5:/mnt/galaxyTools/tools/pkg/samtools-0.1.7_x86_64-linux:/mnt/galaxyTools/tools/pkg/gnuplot-4.4.0/bin:/opt/PostgreSQL/8.4/bin:$PATH\n')
             # os.chown(self.galaxy_home + '/universe_wsgi.ini', pwd.getpwnam("galaxy")[2], grp.getgrnam("galaxy")[2])
             self.configured = True
             
         if self.state != service_states.RUNNING:
             log.info( "Starting Galaxy..." )
             # Make sure admin users get added
             self.add_galaxy_admin_users()
             log.debug('%s - galaxy -c "export SGE_ROOT=%s; sh $GALAXY_HOME/run.sh --daemon"' % (paths.P_SU, paths.P_SGE_ROOT))
             if not misc.run('%s - galaxy -c "export SGE_ROOT=%s; sh $GALAXY_HOME/run.sh --daemon"' % (paths.P_SU, paths.P_SGE_ROOT), "Error invoking Galaxy", "Successfully initiated Galaxy start."):
                 self.state = service_states.ERROR
                 self.last_state_change_time = datetime.utcnow()
         else:
             log.debug("Galaxy already running.")
     else:
         log.info( "Shutting down Galaxy..." )
         if misc.run('%s - galaxy -c "sh $GALAXY_HOME/run.sh --stop-daemon"' % paths.P_SU, "Error stopping Galaxy", "Successfully stopped Galaxy."):
             self.state = service_states.SHUT_DOWN
             self.last_state_change_time = datetime.utcnow()
             subprocess.call( 'mv $GALAXY_HOME/paster.log $GALAXY_HOME/paster.log.%s' % datetime.utcnow().strftime('%H_%M'), shell=True )
示例#9
0
             # Each line of attached_vol must be formatted as follows:
             # <file system name>@<volume ID>@<attached device ID>
             vol_name = attached_vol.split('@')[0].strip()
             vol_id = attached_vol.split('@')[1].strip()
             dev_id = attached_vol.split('@')[2].strip()
             vol_status = self.check_volume(vol_name, vol_id)
             # If found vol does not exist, don't create reference to it
             if vol_status is not None:
                 fs_status = self.check_file_system(vol_name, dev_id)
                 # vol_size = misc.get_volume_size(ec2_conn, vol_id)
                 idd_volumes[vol_name] = [vol_id, dev_id, vol_status, fs_status]
         except Exception, e:
             log.error("Wrong format of line (%s) from attached volumes file. Exception: %s" % (attached_vol, e))
     return idd_volumes
 
 if misc.get_file_from_bucket(s3_conn, self.app.ud['bucket_cluster'], c_vols_file, c_vols_file):
     f = open(c_vols_file, 'r')
     created_vols = f.readlines()
     f.close()
     log.debug("Retrieved following volumes potentially created by/for current instance: %s" % created_vols)
     for created_vol in created_vols:
         try:
             # Each line of created_vol must be formatted as follows:
             # <file system name>@<volume ID>
             vol_name = attached_vol.split('@')[0].strip()
             vol_id = attached_vol.split('@')[1].strip()
             vol_status = self.check_volume(vol_name, vol_id)
             # If found vol does not exist, don't create reference to it
             if vol_status is not None:
                 idd_volumes[vol_name] = [vol_id, dev_id, vol_status, None]
         except Exception, e:
示例#10
0
    def manage_galaxy(self, to_be_started=True):
        if self.app.TESTFLAG is True and self.app.LOCALFLAG is False:
            log.debug("Attempted to manage Galaxy, but TESTFLAG is set.")
            return
        log.debug("Using Galaxy from '{0}'".format(self.galaxy_home))
        os.putenv("GALAXY_HOME", self.galaxy_home)
        os.putenv("TEMP", self.app.path_resolver.galaxy_temp)
        os.putenv("TMPDIR", self.app.path_resolver.galaxy_temp)
        self.env_vars["GALAXY_HOME"] = self.galaxy_home
        self.env_vars["TEMP"] = self.app.path_resolver.galaxy_temp
        self.env_vars["TMPDIR"] = self.app.path_resolver.galaxy_temp
        conf_dir = self.option_manager.setup()
        if conf_dir:
            self.env_vars["GALAXY_UNIVERSE_CONFIG_DIR"] = conf_dir

        if self._multiple_processes():
            self.env_vars["GALAXY_RUN_ALL"] = "TRUE"
            # HACK: Galaxy has a known problem when starting from a fresh configuration
            # in multiple process mode. Each process attempts to create the same directories
            # and one or more processes can fail to start because it "failed" to create
            # said directories (because another process created them first). This hack staggers
            # the process starts in an attempt to circumvent this problem.
            patch_run_sh_command = "sudo sed -i -e \"s/server.log \\$\\@$/\\0; sleep 4/\" %s/run.sh" % self.galaxy_home
            misc.run(patch_run_sh_command)
            self.extra_daemon_args = ""
        else:
            # Instead of sticking with default paster.pid and paster.log, explicitly
            # set pid and log file to ``main.pid`` and ``main.log`` to bring single
            # process case inline with defaults for for multiple process case (i.e.
            # when GALAXY_RUN_ALL is set and multiple servers are defined).
            self.extra_daemon_args = "--pid-file=main.pid --log-file=main.log"
        if to_be_started and self.remaining_start_attempts > 0:
            self.status()
            # If not provided as part of user data, update nginx conf with
            # current paths
            if self.app.ud.get('nginx_conf_contents', None) is None:
                self.configure_nginx()
            if not self.configured:
                log.debug("Setting up Galaxy application")
                s3_conn = self.app.cloud_interface.get_s3_connection()
                if not os.path.exists(self.galaxy_home):
                    log.error("Galaxy application directory '%s' does not exist! Aborting." %
                              self.galaxy_home)
                    log.debug("ls /mnt/: %s" % os.listdir('/mnt/'))
                    self.state = service_states.ERROR
                    self.last_state_change_time = datetime.utcnow()
                    return False
                # If a configuration file is not already in Galaxy's dir,
                # retrieve it from a persistent data repository (i.e., S3)
                if s3_conn:
                    for f_name in ['universe_wsgi.ini',
                                   'tool_conf.xml',
                                   'tool_data_table_conf.xml',
                                   'shed_tool_conf.xml',
                                   'datatypes_conf.xml',
                                   'shed_tool_data_table_conf.xml']:
                        f_path = os.path.join(self.galaxy_home, f_name)
                        if not os.path.exists(f_path):
                            if not misc.get_file_from_bucket(s3_conn, self.app.ud['bucket_cluster'],
                                    '{0}.cloud'.format(f_name), f_path):
                                # We did not get the config file from cluster's
                                # bucket so get it from the default bucket
                                log.debug("Did not get Galaxy configuration file " +
                                          "'{0}' from cluster bucket '{1}'"
                                          .format(f_name, self.app.ud['bucket_cluster']))
                                log.debug("Trying to retrieve one ({0}.cloud) "
                                          "from the default '{1}' bucket."
                                          .format(f_name, self.app.ud['bucket_default']))
                                local_file = os.path.join(self.galaxy_home, f_name)
                                misc.get_file_from_bucket(s3_conn,
                                    self.app.ud['bucket_default'], '{0}.cloud'.format(f_name),
                                    local_file)
                                attempt_chown_galaxy_if_exists(local_file)

                # Make sure the temporary job_working_directory exists on user
                # data volume (defined in universe_wsgi.ini.cloud)
                if not os.path.exists('%s/tmp/job_working_directory' % self.app.path_resolver.galaxy_data):
                    os.makedirs('%s/tmp/job_working_directory/' % self.app.path_resolver.galaxy_data)
                attempt_chown_galaxy('%s/tmp/job_working_directory/' % self.app.path_resolver.galaxy_data)
                # Make sure the default shed_tools directory exists
                if not os.path.exists('%s/../shed_tools' % self.app.path_resolver.galaxy_data):
                    os.makedirs('%s/../shed_tools/' % self.app.path_resolver.galaxy_data)
                attempt_chown_galaxy('%s/../shed_tools/' % self.app.path_resolver.galaxy_data)
                # TEMPORARY ONLY - UNTIL SAMTOOLS WRAPPER IS CONVERTED TO USE
                # DATA TABLES
                if os.path.exists('/mnt/galaxyIndices/locfiles/sam_fa_indices.loc'):
                    shutil.copy(
                        '/mnt/galaxyIndices/locfiles/sam_fa_indices.loc',
                        '%s/tool-data/sam_fa_indices.loc' % self.galaxy_home)
                # Ensure the environment is setup for running Galaxy
                # This can also be setup on the tools snapshot and thus avoid these patches
                # try:
                #     subprocess.call( "sed 's/cd `dirname $0`/cd `dirname $0`; export TEMP=\/mnt\/galaxyData\/tmp/; export DRMAA_LIBRARY_PATH=/opt/sge/lib/lx24-amd64/libdrmaa.so.1.0' %s/run.sh > %s/run.sh.custom" % (self.galaxy_home, self.galaxy_home), shell=True )
                #     misc.run("cd %s; sed 's/python/python -ES/g' run.sh.custom > run.sh" % self.galaxy_home, "Failed to adjust run.sh", "Successfully adjusted run.sh")
                #     shutil.copy( self.galaxy_home + '/run.sh.custom', self.galaxy_home + '/run.sh' )
                #     os.chown( self.galaxy_home + '/run.sh', pwd.getpwnam( "galaxy" )[2], grp.getgrnam( "galaxy" )[2] )
                # except Exception, e:
                #     log.debug("Problem customizing Galaxy's run.sh: %s" % e)
                # try:
                #     misc.run("cd %s; sed 's/pyhton/python -ES/g' setup.sh > setup.sh.custom" % self.galaxy_home, "Failed to edit setup.sh", "Successfully adjusted setup.sh")
                #     shutil.copy( self.galaxy_home + '/setup.sh.custom', self.galaxy_home + '/setup.sh' )
                #     os.chown( self.galaxy_home + '/setup.sh', pwd.getpwnam( "galaxy" )[2], grp.getgrnam( "galaxy" )[2] )
                # except Exception, e:
                #     log.error("Error adjusting setup.sh: %s" % e)
                # subprocess.call( 'sed "s/#start_job_runners = pbs/start_job_runners = sge/" $GALAXY_HOME/universe_wsgi.ini > $GALAXY_HOME/universe_wsgi.ini.custom', shell=True )
                # shutil.move( self.galaxy_home + '/universe_wsgi.ini.custom', self.galaxy_home + '/universe_wsgi.ini' )
                # subprocess.call( 'sed "s/#default_cluster_job_runner = pbs:\/\/\//default_cluster_job_runner = sge:\/\/\//" $GALAXY_HOME/universe_wsgi.ini > $GALAXY_HOME/universe_wsgi.ini.custom', shell=True )
                # shutil.move( self.galaxy_home + '/universe_wsgi.ini.custom', self.galaxy_home + '/universe_wsgi.ini' )
                # Configure PATH in /etc/profile because otherwise some tools do not work
                # with open('/etc/profile', 'a') as f:
                #     f.write('export PATH=/mnt/galaxyTools/tools/bin:/mnt/galaxyTools/tools/pkg/fastx_toolkit_0.0.13:/mnt/galaxyTools/tools/pkg/bowtie-0.12.5:/mnt/galaxyTools/tools/pkg/samtools-0.1.7_x86_64-linux:/mnt/galaxyTools/tools/pkg/gnuplot-4.4.0/bin:/opt/PostgreSQL/8.4/bin:$PATH\n')
                # os.chown(self.galaxy_home + '/universe_wsgi.ini',
                # pwd.getpwnam("galaxy")[2], grp.getgrnam("galaxy")[2])
                self.remaining_start_attempts -= 1
                self.configured = True
            if self.state != service_states.RUNNING:
                log.debug("Starting Galaxy...")
                # Make sure admin users get added
                self.update_galaxy_config()
                start_command = self.galaxy_run_command(
                    "%s --daemon" % self.extra_daemon_args)
                log.debug(start_command)
                if not misc.run(start_command, "Error invoking Galaxy",
                        "Successfully initiated Galaxy start from {0}.".format(self.galaxy_home)):
                    if self.remaining_start_attempts > 0:
                        self.state = service_states.UNSTARTED
                        self.last_state_change_time = datetime.utcnow()
                    else:
                        self.state = service_states.ERROR
                        self.last_state_change_time = datetime.utcnow()
            else:
                log.debug("Galaxy already running.")
        else:
            log.info("Shutting down Galaxy...")
            self.state = service_states.SHUTTING_DOWN
            stop_command = self.galaxy_run_command("%s --stop-daemon" % self.extra_daemon_args)
            if misc.run(stop_command):
                self.state = service_states.SHUT_DOWN
                self.last_state_change_time = datetime.utcnow()
                # Move all log files
                subprocess.call("bash -c 'for f in $GALAXY_HOME/{main,handler,manager,web}*.log; do mv \"$f\" \"$f.%s\"; done'"
                    % datetime.utcnow().strftime('%H_%M'), shell=True)