Esempio n. 1
0
 def index( self, trans, **kwd ):
     """
     GET /api/datatypes
     Return an object containing upload datatypes.
     """
     datatypes_registry = self._datatypes_registry
     extension_only = asbool( kwd.get( 'extension_only', True ) )
     upload_only = asbool( kwd.get( 'upload_only', True ) )
     try:
         if extension_only:
             if upload_only:
                 return datatypes_registry.upload_file_formats
             else:
                 return [ ext for ext in datatypes_registry.datatypes_by_extension ]
         else:
             rval = []
             for elem in datatypes_registry.datatype_elems:
                 if not asbool(elem.get('display_in_upload')) and upload_only:
                     continue
                 keys = ['extension', 'description', 'description_url']
                 dictionary = {}
                 for key in keys:
                     dictionary[key] = elem.get(key)
                 rval.append(dictionary)
             return rval
     except Exception as exception:
         log.error( 'could not get datatypes: %s', str( exception ), exc_info=True )
         if not isinstance( exception, exceptions.MessageException ):
             raise exceptions.InternalServerError( str( exception ) )
         else:
             raise
Esempio n. 2
0
 def requires_prior_installation_of( self ):
     """
     Return a list of repository dependency tuples like (tool_shed, name, owner, changeset_revision, prior_installation_required) for this
     repository's repository dependencies where prior_installation_required is True.  By definition, repository dependencies are required to
     be installed in order for this repository to function correctly.  However, those repository dependencies that are defined for this
     repository with prior_installation_required set to True place them in a special category in that the required repositories must be
     installed before this repository is installed.  Among other things, this enables these "special" repository dependencies to include
     information that enables the successful installation of this repository.  This method is not used during the initial installation of
     this repository, but only after it has been installed (metadata must be set for this repository in order for this method to be useful).
     """
     required_rd_tups_that_must_be_installed = []
     if self.has_repository_dependencies:
         rd_tups = self.metadata[ 'repository_dependencies' ][ 'repository_dependencies' ]
         for rd_tup in rd_tups:
             if len( rd_tup ) == 5:
                 tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
                     common_util.parse_repository_dependency_tuple( rd_tup, contains_error=False )
                 if asbool( prior_installation_required ):
                     required_rd_tups_that_must_be_installed.append( ( tool_shed, name, owner, changeset_revision, 'True', 'False' ) )
             elif len( rd_tup ) == 6:
                 tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
                     common_util.parse_repository_dependency_tuple( rd_tup, contains_error=False )
                 # The repository dependency will only be required to be previously installed if it does not fall into the category of
                 # a repository that must be installed only so that its contained tool dependency can be used for compiling the tool
                 # dependency of the dependent repository.
                 if not asbool( only_if_compiling_contained_td ):
                     if asbool( prior_installation_required ):
                         required_rd_tups_that_must_be_installed.append( ( tool_shed, name, owner, changeset_revision, 'True', 'False' ) )
     return required_rd_tups_that_must_be_installed
Esempio n. 3
0
 def index(self, trans, **kwd):
     """
     GET /api/datatypes
     Return an object containing upload datatypes.
     """
     datatypes_registry = self._datatypes_registry
     try:
         extension_only = asbool(kwd.get('extension_only', True))
         upload_only = asbool(kwd.get('upload_only', True))
         if extension_only:
             if upload_only:
                 return datatypes_registry.upload_file_formats
             else:
                 return [ext for ext in datatypes_registry.datatypes_by_extension]
         else:
             rval = []
             for datatype_info_dict in datatypes_registry.datatype_info_dicts:
                 if not datatype_info_dict.get('display_in_upload') and upload_only:
                     continue
                 rval.append(datatype_info_dict)
             return rval
     except Exception as exception:
         log.error('could not get datatypes: %s', str(exception), exc_info=True)
         if not isinstance(exception, exceptions.MessageException):
             raise exceptions.InternalServerError(str(exception))
         else:
             raise
Esempio n. 4
0
 def __init__( self, **kwargs ):
     pattern = kwargs.get( "pattern", "__default__" )
     if pattern in NAMED_PATTERNS:
         pattern = NAMED_PATTERNS.get( pattern )
     self.pattern = pattern
     self.default_dbkey = kwargs.get( "dbkey", INPUT_DBKEY_TOKEN )
     self.default_ext = kwargs.get( "ext", None )
     if self.default_ext is None and "format" in kwargs:
         self.default_ext = kwargs.get( "format" )
     self.default_visible = asbool( kwargs.get( "visible", None ) )
     self.directory = kwargs.get( "directory", None )
     self.assign_primary_output = asbool( kwargs.get( 'assign_primary_output', False ) )
     sort_by = kwargs.get( "sort_by", DEFAULT_SORT_BY )
     if sort_by.startswith("reverse_"):
         self.sort_reverse = True
         sort_by = sort_by[len("reverse_"):]
     else:
         self.sort_reverse = False
     if "_" in sort_by:
         sort_comp, sort_by = sort_by.split("_", 1)
         assert sort_comp in ["lexical", "numeric"]
     else:
         sort_comp = DEFAULT_SORT_COMP
     assert sort_by in [
         "filename",
         "name",
         "designation",
         "dbkey"
     ]
     self.sort_key = sort_by
     self.sort_comp = sort_comp
 def index( self, trans, **kwd ):
     """
     GET /api/datatypes
     Return an object containing upload datatypes.
     """
     extension_only = asbool( kwd.get( 'extension_only', True ) )
     upload_only = asbool( kwd.get( 'upload_only', True ) )
     try:
         if extension_only:
             if upload_only:
                 return trans.app.datatypes_registry.upload_file_formats
             else:
                 return [ ext for ext in trans.app.datatypes_registry.datatypes_by_extension ]
         else:
             rval = []
             for elem in trans.app.datatypes_registry.datatype_elems:
                 if not asbool(elem.get('display_in_upload')) and upload_only:
                     continue
                 keys = ['extension', 'description', 'description_url']
                 dictionary = {}
                 for key in keys:
                     dictionary[key] = elem.get(key)
                 rval.append(dictionary)
             return rval
     except Exception, exception:
         log.error( 'could not get datatypes: %s', str( exception ), exc_info=True )
         trans.response.status = 500
         return { 'error': str( exception ) }
Esempio n. 6
0
    def containerize_command(self, command):
        def prop(name, default):
            destination_name = "docker_%s" % name
            return self.destination_info.get(destination_name, default)

        env_directives = []
        for pass_through_var in self.tool_info.env_pass_through:
            env_directives.append('"%s=$%s"' % (pass_through_var, pass_through_var))

        # Allow destinations to explicitly set environment variables just for
        # docker container. Better approach is to set for destination and then
        # pass through only what tool needs however. (See todo in ToolInfo.)
        for key, value in six.iteritems(self.destination_info):
            if key.startswith("docker_env_"):
                env = key[len("docker_env_"):]
                env_directives.append('"%s=%s"' % (env, value))

        working_directory = self.job_info.working_directory
        if not working_directory:
            raise Exception("Cannot containerize command [%s] without defined working directory." % working_directory)

        volumes_raw = self._expand_volume_str(self.destination_info.get("docker_volumes", "$defaults"))
        preprocessed_volumes_list = preprocess_volumes(volumes_raw, self.container_type)
        # TODO: Remove redundant volumes...
        volumes = [DockerVolume.from_str(v) for v in preprocessed_volumes_list]
        # If a tool definitely has a temp directory available set it to /tmp in container for compat.
        # with CWL. This is part of that spec and should make it easier to share containers between CWL
        # and Galaxy.
        if self.job_info.tmp_directory is not None:
            volumes.append(docker_util.DockerVolume.volume_from_str("%s:/tmp:rw" % self.job_info.tmp_directory))
        volumes_from = self.destination_info.get("docker_volumes_from", docker_util.DEFAULT_VOLUMES_FROM)

        docker_host_props = dict(
            docker_cmd=prop("cmd", docker_util.DEFAULT_DOCKER_COMMAND),
            sudo=asbool(prop("sudo", docker_util.DEFAULT_SUDO)),
            sudo_cmd=prop("sudo_cmd", docker_util.DEFAULT_SUDO_COMMAND),
            host=prop("host", docker_util.DEFAULT_HOST),
        )

        cached_image_file = self.__get_cached_image_file()
        if not cached_image_file:
            # TODO: Add option to cache it once here and create cached_image_file.
            cache_command = docker_util.build_docker_cache_command(self.container_id, **docker_host_props)
        else:
            cache_command = self.__cache_from_file_command(cached_image_file, docker_host_props)
        run_command = docker_util.build_docker_run_command(
            command,
            self.container_id,
            volumes=volumes,
            volumes_from=volumes_from,
            env_directives=env_directives,
            working_directory=working_directory,
            net=prop("net", "none"),  # By default, docker instance has networking disabled
            auto_rm=asbool(prop("auto_rm", docker_util.DEFAULT_AUTO_REMOVE)),
            set_user=prop("set_user", docker_util.DEFAULT_SET_USER),
            run_extra_arguments=prop("run_extra_arguments", docker_util.DEFAULT_RUN_EXTRA_ARGUMENTS),
            **docker_host_props
        )
        return "%s\n%s" % (cache_command, run_command)
Esempio n. 7
0
    def queue_job( self, job_wrapper ):
        # prepare the job
        include_metadata = asbool( job_wrapper.job_destination.params.get( "embed_metadata_in_job", DEFAULT_EMBED_METADATA_IN_JOB ) )
        if not self.prepare_job( job_wrapper, include_metadata=include_metadata ):
            return

        stderr = stdout = ''
        exit_code = 0

        # command line has been added to the wrapper by prepare_job()
        command_line, exit_code_path = self.__command_line( job_wrapper )
        job_id = job_wrapper.get_id_tag()

        try:
            stdout_file = tempfile.NamedTemporaryFile( suffix='_stdout', dir=job_wrapper.working_directory )
            stderr_file = tempfile.NamedTemporaryFile( suffix='_stderr', dir=job_wrapper.working_directory )
            log.debug( '(%s) executing job script: %s' % ( job_id, command_line ) )
            proc = subprocess.Popen( args=command_line,
                                     shell=True,
                                     cwd=job_wrapper.working_directory,
                                     stdout=stdout_file,
                                     stderr=stderr_file,
                                     env=self._environ,
                                     preexec_fn=os.setpgrp )
            job_wrapper.set_job_destination(job_wrapper.job_destination, proc.pid)
            job_wrapper.change_state( model.Job.states.RUNNING )

            terminated = self.__poll_if_needed( proc, job_wrapper, job_id )
            if terminated:
                return

            # Reap the process and get the exit code.
            exit_code = proc.wait()
            try:
                exit_code = int( open( exit_code_path, 'r' ).read() )
            except Exception:
                log.warn( "Failed to read exit code from path %s" % exit_code_path )
                pass
            stdout_file.seek( 0 )
            stderr_file.seek( 0 )
            stdout = shrink_stream_by_size( stdout_file, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
            stderr = shrink_stream_by_size( stderr_file, DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
            stdout_file.close()
            stderr_file.close()
            log.debug('execution finished: %s' % command_line)
        except Exception:
            log.exception("failure running job %d" % job_wrapper.job_id)
            job_wrapper.fail( "failure running job", exception=True )
            return
        external_metadata = not asbool( job_wrapper.job_destination.params.get( "embed_metadata_in_job", DEFAULT_EMBED_METADATA_IN_JOB ) )
        if external_metadata:
            self._handle_metadata_externally( job_wrapper, resolve_requirements=True )
        # Finish the job!
        try:
            job_wrapper.finish( stdout, stderr, exit_code )
        except:
            log.exception("Job wrapper finish method failed")
            job_wrapper.fail("Unable to finish job", exception=True)
Esempio n. 8
0
 def __init__(self, **kwargs):
     self.default_dbkey = kwargs.get("dbkey", INPUT_DBKEY_TOKEN)
     self.default_ext = kwargs.get("ext", None)
     if self.default_ext is None and "format" in kwargs:
         self.default_ext = kwargs.get("format")
     self.default_visible = asbool(kwargs.get("visible", None))
     self.assign_primary_output = asbool(kwargs.get('assign_primary_output', False))
     self.directory = kwargs.get("directory", None)
     self.recurse = False
 def index( self, trans, **kwd ):
     """
     GET /api/repository_revisions
     Displays a collection (list) of repository revisions.
     """
     # Example URL: http://localhost:9009/api/repository_revisions
     repository_metadata_dicts = []
     # Build up an anded clause list of filters.
     clause_list = []
     # Filter by downloadable if received.
     downloadable =  kwd.get( 'downloadable', None )
     if downloadable is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.downloadable == util.asbool( downloadable ) )
     # Filter by malicious if received.
     malicious =  kwd.get( 'malicious', None )
     if malicious is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.malicious == util.asbool( malicious ) )
     # Filter by tools_functionally_correct if received.
     tools_functionally_correct = kwd.get( 'tools_functionally_correct', None )
     if tools_functionally_correct is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.tools_functionally_correct == util.asbool( tools_functionally_correct ) )
     # Filter by missing_test_components if received.
     missing_test_components = kwd.get( 'missing_test_components', None )
     if missing_test_components is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.missing_test_components == util.asbool( missing_test_components ) )
     # Filter by do_not_test if received.
     do_not_test = kwd.get( 'do_not_test', None )
     if do_not_test is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.do_not_test == util.asbool( do_not_test ) )
     # Filter by includes_tools if received.
     includes_tools = kwd.get( 'includes_tools', None )
     if includes_tools is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.includes_tools == util.asbool( includes_tools ) )
     # Filter by test_install_error if received.
     test_install_error = kwd.get( 'test_install_error', None )
     if test_install_error is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.test_install_error == util.asbool( test_install_error ) )
     # Filter by skip_tool_test if received.
     skip_tool_test = kwd.get( 'skip_tool_test', None )
     if skip_tool_test is not None:
         skip_tool_test = util.asbool( skip_tool_test )
         skipped_metadata_ids_subquery = select( [ trans.app.model.SkipToolTest.table.c.repository_metadata_id ] )
         if skip_tool_test:
             clause_list.append( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) )
         else:
             clause_list.append( not_( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) ) )
     for repository_metadata in trans.sa_session.query( trans.app.model.RepositoryMetadata ) \
                                                .filter( and_( *clause_list ) ) \
                                                .order_by( trans.app.model.RepositoryMetadata.table.c.repository_id.desc() ):
         repository_metadata_dict = repository_metadata.to_dict( view='collection',
                                                                 value_mapper=self.__get_value_mapper( trans ) )
         repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',
                                                          action='show',
                                                          id=trans.security.encode_id( repository_metadata.id ) )
         repository_metadata_dicts.append( repository_metadata_dict )
     return repository_metadata_dicts
Esempio n. 10
0
 def __init__( self, **kwargs ):
     pattern = kwargs.get( "pattern", "__default__" )
     if pattern in NAMED_PATTERNS:
         pattern = NAMED_PATTERNS.get( pattern )
     self.pattern = pattern
     self.default_dbkey = kwargs.get( "dbkey", None )
     self.default_ext = kwargs.get( "ext", None )
     self.default_visible = util.asbool( kwargs.get( "visible", None ) )
     self.directory = kwargs.get( "directory", None )
     self.assign_primary_output = util.asbool( kwargs.get( 'assign_primary_output', False ) )
Esempio n. 11
0
def headless_selenium():
    if asbool(GALAXY_TEST_SELENIUM_REMOTE):
        return False

    if GALAXY_TEST_SELENIUM_HEADLESS == "auto":
        if driver_factory.is_virtual_display_available():
            return True
        else:
            return False
    else:
        return asbool(GALAXY_TEST_SELENIUM_HEADLESS)
Esempio n. 12
0
    def containerize_command(self, command):
        def prop(name, default):
            destination_name = "docker_%s" % name
            return self.destination_info.get(destination_name, default)

        env_directives = []
        for pass_through_var in self.tool_info.env_pass_through:
            env_directives.append('"%s=$%s"' % (pass_through_var, pass_through_var))

        # Allow destinations to explicitly set environment variables just for
        # docker container. Better approach is to set for destination and then
        # pass through only what tool needs however. (See todo in ToolInfo.)
        for key, value in self.destination_info.iteritems():
            if key.startswith("docker_env_"):
                env = key[len("docker_env_"):]
                env_directives.append('"%s=%s"' % (env, value))

        working_directory = self.job_info.working_directory
        if not working_directory:
            raise Exception("Cannot containerize command [%s] without defined working directory." % working_directory)

        volumes_raw = self.__expand_str(self.destination_info.get("docker_volumes", "$defaults"))
        # TODO: Remove redundant volumes...
        volumes = docker_util.DockerVolume.volumes_from_str(volumes_raw)
        volumes_from = self.destination_info.get("docker_volumes_from", docker_util.DEFAULT_VOLUMES_FROM)

        docker_host_props = dict(
            docker_cmd=prop("cmd", docker_util.DEFAULT_DOCKER_COMMAND),
            sudo=asbool(prop("sudo", docker_util.DEFAULT_SUDO)),
            sudo_cmd=prop("sudo_cmd", docker_util.DEFAULT_SUDO_COMMAND),
            host=prop("host", docker_util.DEFAULT_HOST),
        )

        cached_image_file = self.__get_cached_image_file()
        if not cached_image_file:
            # TODO: Add option to cache it once here and create cached_image_file.
            cache_command = docker_util.build_docker_cache_command(self.container_id, **docker_host_props)
        else:
            cache_command = self.__cache_from_file_command(cached_image_file, docker_host_props)
        run_command = docker_util.build_docker_run_command(
            command,
            self.container_id,
            volumes=volumes,
            volumes_from=volumes_from,
            env_directives=env_directives,
            working_directory=working_directory,
            net=prop("net", "none"),  # By default, docker instance has networking disabled
            auto_rm=asbool(prop("auto_rm", docker_util.DEFAULT_AUTO_REMOVE)),
            set_user=asbool(prop("set_user", docker_util.DEFAULT_SET_USER)),
            **docker_host_props
        )
        return "%s\n%s" % (cache_command, run_command)
Esempio n. 13
0
 def build_invalid_repository_dependencies_root_folder( self, folder_id, invalid_repository_dependencies_dict ):
     """Return a folder hierarchy containing invalid repository dependencies."""
     label = 'Invalid repository dependencies'
     if invalid_repository_dependencies_dict:
         invalid_repository_dependency_id = 0
         folder_id += 1
         invalid_repository_dependencies_root_folder = \
             utility_container_manager.Folder( id=folder_id,
                                               key='root',
                                               label='root',
                                               parent=None )
         folder_id += 1
         invalid_repository_dependencies_folder = \
             utility_container_manager.Folder( id=folder_id,
                                               key='invalid_repository_dependencies',
                                               label=label,
                                               parent=invalid_repository_dependencies_root_folder )
         invalid_repository_dependencies_root_folder.folders.append( invalid_repository_dependencies_folder )
         invalid_repository_dependencies = invalid_repository_dependencies_dict[ 'repository_dependencies' ]
         for invalid_repository_dependency in invalid_repository_dependencies:
             folder_id += 1
             invalid_repository_dependency_id += 1
             toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td, error = \
                 common_util.parse_repository_dependency_tuple( invalid_repository_dependency, contains_error=True )
             key = container_util.generate_repository_dependencies_key_for_repository( toolshed,
                                                                                       name,
                                                                                       owner,
                                                                                       changeset_revision,
                                                                                       prior_installation_required,
                                                                                       only_if_compiling_contained_td )
             label = "Repository <b>%s</b> revision <b>%s</b> owned by <b>%s</b>" % ( name, changeset_revision, owner )
             folder = utility_container_manager.Folder( id=folder_id,
                                                        key=key,
                                                        label=label,
                                                        parent=invalid_repository_dependencies_folder )
             ird = InvalidRepositoryDependency( id=invalid_repository_dependency_id,
                                                toolshed=toolshed,
                                                repository_name=name,
                                                repository_owner=owner,
                                                changeset_revision=changeset_revision,
                                                prior_installation_required=util.asbool( prior_installation_required ),
                                                only_if_compiling_contained_td=util.asbool( only_if_compiling_contained_td ),
                                                error=error )
             folder.invalid_repository_dependencies.append( ird )
             invalid_repository_dependencies_folder.folders.append( folder )
     else:
         invalid_repository_dependencies_root_folder = None
     return folder_id, invalid_repository_dependencies_root_folder
Esempio n. 14
0
    def show(self, trans, id, **kwd):
        """
        show( trans, id )
        * GET /api/jobs/{id}:
            return jobs for current user

        :type   id: string
        :param  id: Specific job id

        :type   full: boolean
        :param  full: whether to return extra information

        :rtype:     dictionary
        :returns:   dictionary containing full description of job data
        """
        job = self.__get_job(trans, id)
        is_admin = trans.user_is_admin()
        job_dict = self.encode_all_ids(trans, job.to_dict("element", system_details=is_admin), True)
        full_output = util.asbool(kwd.get("full", "false"))
        if full_output:
            job_dict.update(dict(stderr=job.stderr, stdout=job.stdout))
            if is_admin:
                job_dict["user_email"] = job.user.email

                def metric_to_dict(metric):
                    metric_name = metric.metric_name
                    metric_value = metric.metric_value
                    metric_plugin = metric.plugin
                    title, value = trans.app.job_metrics.format(metric_plugin, metric_name, metric_value)
                    return dict(
                        title=title, value=value, plugin=metric_plugin, name=metric_name, raw_value=str(metric_value)
                    )

                job_dict["job_metrics"] = [metric_to_dict(metric) for metric in job.metrics]
        return job_dict
Esempio n. 15
0
    def search( self, trans, search_term, **kwd ):
        """ 
        Perform a search over the Whoosh index. 
        The index has to be pre-created with build_ts_whoosh_index.sh.
        TS config option toolshed_search_on has to be turned on and
        toolshed_whoosh_index_dir has to be specified and existing.

        :param search_term:
        :param page:
        :param jsonp:
        :param callback:

        :returns dict:
        """
        if not self.app.config.toolshed_search_on:
            raise exceptions.ConfigDoesNotAllowException( 'Searching the TS through the API is turned off for this instance.' )
        if not self.app.config.toolshed_whoosh_index_dir:
            raise exceptions.ConfigDoesNotAllowException( 'There is no directory for the search index specified. Please ontact the administrator.' )
        search_term = search_term.strip()
        if len( search_term ) < 3:
            raise exceptions.RequestParameterInvalidException( 'The search term has to be at least 3 characters long.' )

        page = kwd.get( 'page', 1 )
        return_jsonp = util.asbool( kwd.get( 'jsonp', False ) )
        callback = kwd.get( 'callback', 'callback' )

        repo_search = RepoSearch()
        results = repo_search.search( trans, search_term, page )
        results[ 'hostname' ] = url_for( '/', qualified = True )

        if jsonp:
            response = '%s(%s);' % ( callback, json.dumps( results ) )
        else:
            response = json.dumps( results )
        return response
Esempio n. 16
0
 def upgrade_available( self ):
     if self.tool_shed_status:
         if self.is_deprecated_in_tool_shed:
             # Only allow revision upgrades if the repository is not deprecated in the tool shed.
             return False
         return asbool( self.tool_shed_status.get( 'revision_upgrade', False ) )
     return False
 def get_prior_install_required_dict(self, tool_shed_repositories, repository_dependencies_dict):
     """
     Return a dictionary whose keys are the received tsr_ids and whose values are a list of tsr_ids, each of which is contained in the received
     list of tsr_ids and whose associated repository must be installed prior to the repository associated with the tsr_id key.
     """
     # Initialize the dictionary.
     prior_install_required_dict = {}
     tsr_ids = [tool_shed_repository.id for tool_shed_repository in tool_shed_repositories]
     for tsr_id in tsr_ids:
         prior_install_required_dict[tsr_id] = []
     # Inspect the repository dependencies about to be installed and populate the dictionary.
     for rd_key, rd_tups in repository_dependencies_dict.items():
         if rd_key in ["root_key", "description"]:
             continue
         for rd_tup in rd_tups:
             prior_install_ids = []
             tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = common_util.parse_repository_dependency_tuple(
                 rd_tup
             )
             if util.asbool(prior_installation_required):
                 for tsr in tool_shed_repositories:
                     if tsr.name == name and tsr.owner == owner and tsr.changeset_revision == changeset_revision:
                         prior_install_ids.append(tsr.id)
                     prior_install_required_dict[tsr.id] = prior_install_ids
     return prior_install_required_dict
Esempio n. 18
0
    def index( self, trans, deleted=False, owner=None, name=None, **kwd ):
        """
        GET /api/repositories

        :param deleted: True/False, displays repositories that are or are not set to deleted.
        :param owner: the owner's public username.
        :param name: the repository name.

        Displays a collection (list) of repositories.
        """
        # Example URL: http://localhost:9009/api/repositories
        repository_dicts = []
        deleted = util.asbool( deleted )
        clause_list = [ and_( trans.app.model.Repository.table.c.deprecated == False,
                              trans.app.model.Repository.table.c.deleted == deleted ) ]
        if owner is not None:
            clause_list.append( and_( trans.app.model.User.table.c.username == owner, 
                                      trans.app.model.Repository.table.c.user_id == trans.app.model.User.table.c.id ) )
        if name is not None:
            clause_list.append( trans.app.model.Repository.table.c.name == name )
        for repository in trans.sa_session.query( trans.app.model.Repository ) \
                                          .filter( *clause_list ) \
                                          .order_by( trans.app.model.Repository.table.c.name ):
            repository_dict = repository.to_dict( view='collection',
                                                  value_mapper=self.__get_value_mapper( trans ) )
            repository_dict[ 'url' ] = web.url_for( controller='repositories',
                                                    action='show',
                                                    id=trans.security.encode_id( repository.id ) )
            repository_dicts.append( repository_dict )
        return repository_dicts
Esempio n. 19
0
 def __basic_authentication( self, environ, username, password ):
     """The environ parameter is needed in basic authentication.  We also check it if use_remote_user is true."""
     if asbool( self.config.get( 'use_remote_user', False ) ):
         assert "HTTP_REMOTE_USER" in environ, "use_remote_user is set but no HTTP_REMOTE_USER variable"
         return self.__authenticate_remote_user( environ, username, password )
     else:
         return self.__authenticate( username, password )
Esempio n. 20
0
    def repository_ids_for_setting_metadata( self, trans, my_writable=False, **kwd ):
        """
        GET /api/repository_ids_for_setting_metadata

        Displays a collection (list) of repository ids ordered for setting metadata.

        :param key: the API key of the Tool Shed user.
        :param my_writable (optional): if the API key is associated with an admin user in the Tool Shed, setting this param value
                                       to True will restrict resetting metadata to only repositories that are writable by the user
                                       in addition to those repositories of type tool_dependency_definition.  This param is ignored
                                       if the current user is not an admin user, in which case this same restriction is automatic.
        """
        if trans.user_is_admin():
            my_writable = util.asbool( my_writable )
        else:
            my_writable = True
        handled_repository_ids = []
        repository_ids = []
        rmm = repository_metadata_manager.RepositoryMetadataManager( trans.app, trans.user )
        query = rmm.get_query_for_setting_metadata_on_repositories( my_writable=my_writable, order=False )
        # Make sure repositories of type tool_dependency_definition are first in the list.
        for repository in query:
            if repository.type == rt_util.TOOL_DEPENDENCY_DEFINITION and repository.id not in handled_repository_ids:
                repository_ids.append( trans.security.encode_id( repository.id ) )
        # Now add all remaining repositories to the list.
        for repository in query:
            if repository.type != rt_util.TOOL_DEPENDENCY_DEFINITION and repository.id not in handled_repository_ids:
                repository_ids.append( trans.security.encode_id( repository.id ) )
        return repository_ids
Esempio n. 21
0
 def __init__(self, **kwargs):
     super(FilePatternDatasetCollectionDescription, self).__init__(**kwargs)
     pattern = kwargs.get("pattern", "__default__")
     self.recurse = asbool(kwargs.get("recurse", False))
     if pattern in NAMED_PATTERNS:
         pattern = NAMED_PATTERNS.get(pattern)
     self.pattern = pattern
     sort_by = kwargs.get("sort_by", DEFAULT_SORT_BY)
     if sort_by.startswith("reverse_"):
         self.sort_reverse = True
         sort_by = sort_by[len("reverse_"):]
     else:
         self.sort_reverse = False
     if "_" in sort_by:
         sort_comp, sort_by = sort_by.split("_", 1)
         assert sort_comp in ["lexical", "numeric"]
     else:
         sort_comp = DEFAULT_SORT_COMP
     assert sort_by in [
         "filename",
         "name",
         "designation",
         "dbkey"
     ]
     self.sort_key = sort_by
     self.sort_comp = sort_comp
Esempio n. 22
0
def app_factory( global_conf, **kwargs ):
    """Return a wsgi application serving the root object"""
    # Create the Galaxy application unless passed in
    kwargs = load_app_properties(
        kwds=kwargs
    )
    if 'app' in kwargs:
        app = kwargs.pop( 'app' )
    else:
        from galaxy.webapps.reports.app import UniverseApplication
        app = UniverseApplication( global_conf=global_conf, **kwargs )
    atexit.register( app.shutdown )
    # Create the universe WSGI application
    webapp = ReportsWebApplication( app, session_cookie='galaxyreportssession', name="reports" )
    add_ui_controllers( webapp, app )
    # These two routes handle our simple needs at the moment
    webapp.add_route( '/{controller}/{action}', controller="root", action='index' )
    webapp.add_route( '/{action}', controller='root', action='index' )
    webapp.finalize_config()
    # Wrap the webapp in some useful middleware
    if kwargs.get( 'middleware', True ):
        webapp = wrap_in_middleware( webapp, global_conf, **kwargs )
    if asbool( kwargs.get( 'static_enabled', True ) ):
        webapp = wrap_in_static( webapp, global_conf, **kwargs )
    # Close any pooled database connections before forking
    try:
        galaxy.model.mapping.metadata.bind.dispose()
    except:
        log.exception("Unable to dispose of pooled galaxy model database connections.")
    # Return
    return webapp
def main(args):
    '''
    Amazon credentials can be provided in one of three ways:
    1. By specifying them on the command line with the --id and --secret arguments.
    2. By specifying a path to a file that contains the credentials in the form ACCESS_KEY:SECRET_KEY
       using the --s3passwd argument.
    3. By specifying the above path in the 's3passwd' environment variable.
    Each listed option will override the ones below it, if present.
    '''
    if None in [args.id, args.secret]:
        if args.s3passwd is None:
            args.s3passwd = os.environ.get('s3passwd', None)
        if args.s3passwd is not None and os.path.exists(args.s3passwd):
            awsid, secret = open(args.s3passwd, 'r').read().rstrip('\n').split(':')
        else:
            print('Amazon ID and secret not provided, and no s3passwd file found.')
            return 1
    else:
        awsid = args.id
        secret = args.secret
    dependency_cleaner = BucketList(awsid, secret, args.bucket)
    if len(dependency_cleaner.empty_installation_paths) == 0:
        print('No empty installation paths found, exiting.')
        return 0
    print('The following %d tool dependency installation paths were found to be empty or contain only the file %s.' %
        (len(dependency_cleaner.empty_installation_paths), INSTALLATION_LOG))
    if asbool(args.delete):
        dependency_cleaner.delete_empty_installation_paths()
    else:
        for empty_installation_path in dependency_cleaner.empty_installation_paths:
            print(empty_installation_path)
    return 0
Esempio n. 24
0
    def index( self, trans, **kwd ):
        """
        index( self, trans, **kwd )
        * GET /api/libraries:
            Returns a list of summary data for all libraries.

        :param  deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted``
        :type   deleted: boolean (optional)

        :returns:   list of dictionaries containing library information
        :rtype:     list

        .. seealso:: :attr:`galaxy.model.Library.dict_collection_visible_keys`

        """
        is_admin = trans.user_is_admin()
        query = trans.sa_session.query( trans.app.model.Library )
        deleted = kwd.get( 'deleted', 'missing' )
        try:
            if not is_admin:
                # non-admins can't see deleted libraries
                deleted = False
            else:
                deleted = util.asbool( deleted )
            if deleted:
                query = query.filter( trans.app.model.Library.table.c.deleted == True )
            else:
                query = query.filter( trans.app.model.Library.table.c.deleted == False )
        except ValueError:
            # given value wasn't true/false but the user is admin so we don't filter on this parameter at all
            pass

        if not is_admin:
            # non-admins can see only allowed and public libraries
            current_user_role_ids = [ role.id for role in trans.get_current_user_roles() ]
            library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
            restricted_library_ids = [ lp.library_id for lp in ( trans.sa_session.query( trans.model.LibraryPermissions )
                                                                 .filter( trans.model.LibraryPermissions.table.c.action == library_access_action )
                                                                 .distinct() ) ]
            accessible_restricted_library_ids = [ lp.library_id for lp in ( trans.sa_session.query( trans.model.LibraryPermissions )
                                                  .filter( and_( trans.model.LibraryPermissions.table.c.action == library_access_action,
                                                                 trans.model.LibraryPermissions.table.c.role_id.in_( current_user_role_ids ) ) ) ) ]
            query = query.filter( or_( not_( trans.model.Library.table.c.id.in_( restricted_library_ids ) ), trans.model.Library.table.c.id.in_( accessible_restricted_library_ids ) ) )
        libraries = []
        for library in query:
            item = library.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'root_folder_id': trans.security.encode_id } )
            if trans.app.security_agent.library_is_public( library, contents=False ):
                item[ 'public' ] = True
            current_user_roles = trans.get_current_user_roles()
            if not trans.user_is_admin():
                item['can_user_add'] = trans.app.security_agent.can_add_library_item( current_user_roles, library )
                item['can_user_modify'] = trans.app.security_agent.can_modify_library_item( current_user_roles, library )
                item['can_user_manage'] = trans.app.security_agent.can_manage_library_item( current_user_roles, library )
            else:
                item['can_user_add'] = True
                item['can_user_modify'] = True
                item['can_user_manage'] = True
            libraries.append( item )
        return libraries
Esempio n. 25
0
 def revision_update_available( self ):
     # This method should be named update_available, but since it is no longer possible to drop a table column using migration scripts
     # with the sqlite database (see ~/galaxy/model/migrate/versions/0016_drop_update_available_col_add_tool_shed_status_col.py), we
     # have to name it in such a way that it will not conflict with the eliminated tool_shed_repository.update_available column (which
     # cannot be eliminated if using the sqlite database).
     if self.tool_shed_status:
         return asbool( self.tool_shed_status.get( 'revision_update', False ) )
     return False
Esempio n. 26
0
    def install( self, trans, **kwd ):
        """
        POST /api/tool_shed_repositories/install
        Initiate the installation of a repository.

        :param install_resolver_dependencies: True to install resolvable dependencies.
        :param install_tool_dependencies: True to install tool dependencies.
        :param install_repository_dependencies: True to install repository dependencies.
        :param tool_panel_section_id: The unique identifier for an existing tool panel section
        :param new_tool_panel_section_label: Create a new tool panel section with this label
        :param shed_tool_conf: The shed tool config file to use for this installation
        :param tool_shed_url: The URL for the toolshed whence this repository is being installed
        :param changeset: The changeset to update to after cloning the repository
        """
        params = dict()
        irm = InstallRepositoryManager( self.app )
        changeset = kwd.get( 'changeset', None )
        tool_shed_url = kwd.get( 'tool_shed_url', None )
        params[ 'name' ] = kwd.get( 'name', None )
        params[ 'owner' ] = kwd.get( 'owner', None )
        params[ 'tool_panel_section_id' ] = kwd.get( 'tool_panel_section_id', None )
        params[ 'new_tool_panel_section_label' ] = kwd.get( 'new_tool_panel_section', None )
        params[ 'tool_panel_section_mapping' ] = json.loads( kwd.get( 'tool_panel_section', '{}' ) )
        params[ 'install_resolver_dependencies' ] = util.asbool( kwd.get( 'install_resolver_dependencies', False ) )
        params[ 'install_tool_dependencies' ] = util.asbool( kwd.get( 'install_tool_dependencies', False ) )
        params[ 'install_repository_dependencies' ] = util.asbool( kwd.get( 'install_repository_dependencies', False ) )
        params[ 'shed_tool_conf' ] = kwd.get( 'shed_tool_conf', None )
        params[ 'tool_path' ] = suc.get_tool_path_by_shed_tool_conf_filename( self.app, params[ 'shed_tool_conf' ] )
        try:
            tool_shed_repositories = irm.install(
                tool_shed_url,
                params[ 'name' ],
                params[ 'owner' ],
                changeset,
                params
            )
            if tool_shed_repositories is None:
                message = "No repositories were installed, possibly because the selected repository has already been installed."
                return dict( status="ok", message=message )
            tsr_ids_for_monitoring = [ trans.security.encode_id( tsr.id ) for tsr in tool_shed_repositories ]
            url = web.url_for( controller='admin_toolshed', action='monitor_repository_installation', tool_shed_repository_ids=','.join( tsr_ids_for_monitoring ) )
            return url
        except RepositoriesInstalledException as e:
            log.exception( e )
            return dict( message=e.message,
                         status='error' )
Esempio n. 27
0
def dataset_collection_description(**kwargs):
    if asbool(kwargs.get("from_provided_metadata", False)):
        for key in ["pattern", "sort_by"]:
            if kwargs.get(key):
                raise Exception("Cannot specify attribute [%s] if from_provided_metadata is True" % key)
        return ToolProvidedMetadataDatasetCollection(**kwargs)
    else:
        return FilePatternDatasetCollectionDescription(**kwargs)
Esempio n. 28
0
 def __init__(self, **kwargs):
     self.verbose = asbool(kwargs.get("verbose", False))
     params_str = kwargs.get("params", None)
     if params_str:
         params = [v.strip() for v in params_str.split(",")]
     else:
         params = TITLES.keys()
     self.params = params
Esempio n. 29
0
    def __init__( self, **kwargs ):
        self.__configure_paths( kwargs )
        self.__configure_subsystems( kwargs )
        saved_logs_path = kwargs.get( "saved_logs_path", None )
        if "app" in kwargs:
            saved_logs_path = kwargs[ "app" ].config.resolve_path( saved_logs_path )
        self.saved_logs_path = saved_logs_path
        self.__configure_collectl_recorder_args( kwargs )
        self.summarize_process_data = util.asbool( kwargs.get( "summarize_process_data", True ) )
        self.log_collectl_program_output = util.asbool( kwargs.get( "log_collectl_program_output", False ) )
        if self.summarize_process_data:
            if subsystems.get_subsystem( "process" ) not in self.subsystems:
                raise Exception( "Collectl plugin misconfigured - cannot summarize_process_data without process subsystem being enabled." )

            process_statistics = kwargs.get( "process_statistics", None )
            # None will let processes module use default set of statistics
            # defined there.
            self.process_statistics = processes.parse_process_statistics( process_statistics )
Esempio n. 30
0
def default_web_host_for_selenium_tests():
    if asbool(GALAXY_TEST_SELENIUM_REMOTE):
        try:
            dev_ip = get_ip_address('docker0')
            return dev_ip
        except IOError:
            return DEFAULT_WEB_HOST
    else:
        return DEFAULT_WEB_HOST
Esempio n. 31
0
 def __init__(self, **kwargs):
     self.verbose = asbool(kwargs.get("verbose", False))
Esempio n. 32
0
def app_factory(global_conf, **kwargs):
    """
    Return a wsgi application serving the root object
    """
    kwargs = load_app_properties(kwds=kwargs)
    # Create the Galaxy application unless passed in
    if 'app' in kwargs:
        app = kwargs.pop('app')
        galaxy.app.app = app
    else:
        try:
            app = galaxy.app.UniverseApplication(global_conf=global_conf,
                                                 **kwargs)
            galaxy.app.app = app
        except:
            import traceback
            traceback.print_exc()
            sys.exit(1)
    # Call app's shutdown method when the interpeter exits, this cleanly stops
    # the various Galaxy application daemon threads
    atexit.register(app.shutdown)
    # Create the universe WSGI application
    webapp = GalaxyWebApplication(app,
                                  session_cookie='galaxysession',
                                  name='galaxy')
    webapp.add_ui_controllers('galaxy.webapps.galaxy.controllers', app)
    # Force /history to go to /root/history -- needed since the tests assume this
    webapp.add_route('/history', controller='root', action='history')
    # Force /activate to go to the controller
    webapp.add_route('/activate', controller='user', action='activate')
    # These two routes handle our simple needs at the moment
    webapp.add_route('/async/:tool_id/:data_id/:data_secret',
                     controller='async',
                     action='index',
                     tool_id=None,
                     data_id=None,
                     data_secret=None)
    webapp.add_route('/:controller/:action', action='index')
    webapp.add_route('/:action', controller='root', action='index')

    # allow for subdirectories in extra_files_path
    webapp.add_route('/datasets/:dataset_id/display/{filename:.+?}',
                     controller='dataset',
                     action='display',
                     dataset_id=None,
                     filename=None)
    webapp.add_route('/datasets/:dataset_id/:action/:filename',
                     controller='dataset',
                     action='index',
                     dataset_id=None,
                     filename=None)
    webapp.add_route(
        '/display_application/:dataset_id/:app_name/:link_name/:user_id/:app_action/:action_param',
        controller='dataset',
        action='display_application',
        dataset_id=None,
        user_id=None,
        app_name=None,
        link_name=None,
        app_action=None,
        action_param=None)
    webapp.add_route('/u/:username/d/:slug/:filename',
                     controller='dataset',
                     action='display_by_username_and_slug',
                     filename=None)
    webapp.add_route('/u/:username/p/:slug',
                     controller='page',
                     action='display_by_username_and_slug')
    webapp.add_route('/u/:username/h/:slug',
                     controller='history',
                     action='display_by_username_and_slug')
    webapp.add_route('/u/:username/w/:slug',
                     controller='workflow',
                     action='display_by_username_and_slug')
    webapp.add_route('/u/:username/w/:slug/:format',
                     controller='workflow',
                     action='display_by_username_and_slug')
    webapp.add_route('/u/:username/v/:slug',
                     controller='visualization',
                     action='display_by_username_and_slug')
    webapp.add_route('/search', controller='search', action='index')

    # TODO: Refactor above routes into external method to allow testing in
    # isolation as well.
    populate_api_routes(webapp, app)

    # ==== Done
    # Indicate that all configuration settings have been provided
    webapp.finalize_config()

    # Wrap the webapp in some useful middleware
    if kwargs.get('middleware', True):
        webapp = wrap_in_middleware(webapp, global_conf, **kwargs)
    if asbool(kwargs.get('static_enabled', True)):
        if app.config.is_uwsgi:
            log.error(
                "Static middleware is enabled in your configuration but this is a uwsgi process.  Refusing to wrap in static middleware."
            )
        else:
            webapp = wrap_in_static(
                webapp,
                global_conf,
                plugin_frameworks=[app.visualizations_registry],
                **kwargs)
    # Close any pooled database connections before forking
    try:
        galaxy.model.mapping.metadata.bind.dispose()
    except:
        log.exception(
            "Unable to dispose of pooled galaxy model database connections.")
    try:
        # This model may not actually be bound.
        if galaxy.model.tool_shed_install.mapping.metadata.bind:
            galaxy.model.tool_shed_install.mapping.metadata.bind.dispose()
    except:
        log.exception(
            "Unable to dispose of pooled toolshed install model database connections."
        )

    if not app.config.is_uwsgi:
        postfork_setup()

    # Return
    return webapp
from logging import getLogger

from requests import get, post, delete, patch
from six import StringIO

from galaxy import util
from galaxy.tools.parser.interface import TestCollectionDef
from galaxy.util.bunch import Bunch
from galaxy.util.odict import odict

log = getLogger( __name__ )

# Off by default because it can pound the database pretty heavily
# and result in sqlite errors on larger tests or larger numbers of
# tests.
VERBOSE_ERRORS = util.asbool( os.environ.get( "GALAXY_TEST_VERBOSE_ERRORS", False ) )
UPLOAD_ASYNC = util.asbool( os.environ.get( "GALAXY_TEST_UPLOAD_ASYNC", True ) )
ERROR_MESSAGE_DATASET_SEP = "--------------------------------------"


def build_interactor( test_case, type="api" ):
    interactor_class = GALAXY_INTERACTORS[ type ]
    return interactor_class( test_case )


def stage_data_in_history( galaxy_interactor, all_test_data, history, shed_tool_id=None ):
    # Upload any needed files
    upload_waits = []

    if UPLOAD_ASYNC:
        for test_data in all_test_data:
def get_driver():
    if asbool(GALAXY_TEST_SELENIUM_REMOTE):
        return get_remote_driver()
    else:
        return get_local_driver()
Esempio n. 35
0
 def check_fluent_logger(self):
     return asbool(self.config["fluent_log"])
Esempio n. 36
0
    def queue_job(self, job_wrapper):
        # prepare the job
        include_metadata = asbool(
            job_wrapper.job_destination.params.get(
                "embed_metadata_in_job", DEFAULT_EMBED_METADATA_IN_JOB))
        if not self.prepare_job(job_wrapper,
                                include_metadata=include_metadata):
            return

        stderr = stdout = ''
        exit_code = 0

        # command line has been added to the wrapper by prepare_job()
        command_line, exit_code_path = self.__command_line(job_wrapper)
        job_id = job_wrapper.get_id_tag()

        try:
            stdout_file = tempfile.NamedTemporaryFile(
                suffix='_stdout', dir=job_wrapper.working_directory)
            stderr_file = tempfile.NamedTemporaryFile(
                suffix='_stderr', dir=job_wrapper.working_directory)
            log.debug('(%s) executing job script: %s' % (job_id, command_line))
            proc = subprocess.Popen(args=command_line,
                                    shell=True,
                                    cwd=job_wrapper.working_directory,
                                    stdout=stdout_file,
                                    stderr=stderr_file,
                                    env=self._environ,
                                    preexec_fn=os.setpgrp)
            job_wrapper.set_job_destination(job_wrapper.job_destination,
                                            proc.pid)
            job_wrapper.change_state(model.Job.states.RUNNING)

            terminated = self.__poll_if_needed(proc, job_wrapper, job_id)
            if terminated:
                return

            # Reap the process and get the exit code.
            exit_code = proc.wait()
            try:
                exit_code = int(open(exit_code_path, 'r').read())
            except Exception:
                log.warn("Failed to read exit code from path %s" %
                         exit_code_path)
                pass
            stdout_file.seek(0)
            stderr_file.seek(0)
            stdout = shrink_stream_by_size(stdout_file,
                                           DATABASE_MAX_STRING_SIZE,
                                           join_by="\n..\n",
                                           left_larger=True,
                                           beginning_on_size_error=True)
            stderr = shrink_stream_by_size(stderr_file,
                                           DATABASE_MAX_STRING_SIZE,
                                           join_by="\n..\n",
                                           left_larger=True,
                                           beginning_on_size_error=True)
            stdout_file.close()
            stderr_file.close()
            log.debug('execution finished: %s' % command_line)
        except Exception:
            log.exception("failure running job %d" % job_wrapper.job_id)
            job_wrapper.fail("failure running job", exception=True)
            return
        external_metadata = not asbool(
            job_wrapper.job_destination.params.get(
                "embed_metadata_in_job", DEFAULT_EMBED_METADATA_IN_JOB))
        if external_metadata:
            self._handle_metadata_externally(job_wrapper,
                                             resolve_requirements=True)
        # Finish the job!
        try:
            job_wrapper.finish(stdout, stderr, exit_code)
        except:
            log.exception("Job wrapper finish method failed")
            job_wrapper.fail("Unable to finish job", exception=True)
Esempio n. 37
0
 def set_filter_dependencies_needed_for_compiling(self, value):
     self.filter_dependencies_needed_for_compiling = asbool(value)
Esempio n. 38
0
 def get_singularity_target_kwds(self):
     return dict(
         singularity_cmd=self.prop("cmd", singularity_util.DEFAULT_SINGULARITY_COMMAND),
         sudo=asbool(self.prop("sudo", singularity_util.DEFAULT_SUDO)),
         sudo_cmd=self.prop("sudo_cmd", singularity_util.DEFAULT_SUDO_COMMAND),
     )
Esempio n. 39
0
 def __container_type_enabled(self, container_type, destination_info):
     return asbool(
         destination_info.get("%s_enabled" % container_type, False))
Esempio n. 40
0
def main():
    # ---- Configuration ------------------------------------------------------
    tool_shed_test_host = os.environ.get('TOOL_SHED_TEST_HOST',
                                         default_tool_shed_test_host)
    tool_shed_test_port = os.environ.get('TOOL_SHED_TEST_PORT', None)
    galaxy_test_host = os.environ.get('GALAXY_TEST_HOST',
                                      default_galaxy_test_host)
    galaxy_test_port = os.environ.get('GALAXY_TEST_PORT', None)
    tool_path = os.environ.get('TOOL_SHED_TEST_TOOL_PATH', 'tools')
    if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
        os.environ['HTTP_ACCEPT_LANGUAGE'] = default_tool_shed_locales
    tool_shed_test_file_dir = os.environ.get('TOOL_SHED_TEST_FILE_DIR',
                                             default_tool_shed_test_file_dir)
    if not os.path.isabs(tool_shed_test_file_dir):
        tool_shed_test_file_dir = tool_shed_test_file_dir
    ignore_files = ()
    tool_dependency_dir = os.environ.get('TOOL_SHED_TOOL_DEPENDENCY_DIR', None)
    use_distributed_object_store = os.environ.get(
        'TOOL_SHED_USE_DISTRIBUTED_OBJECT_STORE', False)
    if not os.path.isdir(tool_shed_test_tmp_dir):
        os.mkdir(tool_shed_test_tmp_dir)
    tool_shed_test_proxy_port = None
    galaxy_test_proxy_port = None
    if 'TOOL_SHED_TEST_DBPATH' in os.environ:
        shed_db_path = os.environ['TOOL_SHED_TEST_DBPATH']
    else:
        tempdir = tempfile.mkdtemp(dir=tool_shed_test_tmp_dir)
        shed_db_path = os.path.join(tempdir, 'database')
    shed_tool_data_table_conf_file = os.environ.get(
        'TOOL_SHED_TEST_TOOL_DATA_TABLE_CONF',
        os.path.join(tool_shed_test_tmp_dir, 'shed_tool_data_table_conf.xml'))
    galaxy_shed_data_manager_conf_file = os.environ.get(
        'GALAXY_SHED_DATA_MANAGER_CONF',
        os.path.join(tool_shed_test_tmp_dir,
                     'test_shed_data_manager_conf.xml'))
    galaxy_tool_data_table_conf_file = os.environ.get(
        'GALAXY_TEST_TOOL_DATA_TABLE_CONF',
        os.path.join(tool_shed_test_tmp_dir, 'tool_data_table_conf.xml'))
    galaxy_tool_conf_file = os.environ.get(
        'GALAXY_TEST_TOOL_CONF',
        os.path.join(tool_shed_test_tmp_dir, 'test_tool_conf.xml'))
    galaxy_shed_tool_conf_file = os.environ.get(
        'GALAXY_TEST_SHED_TOOL_CONF',
        os.path.join(tool_shed_test_tmp_dir, 'test_shed_tool_conf.xml'))
    galaxy_migrated_tool_conf_file = os.environ.get(
        'GALAXY_TEST_MIGRATED_TOOL_CONF',
        os.path.join(tool_shed_test_tmp_dir, 'test_migrated_tool_conf.xml'))
    galaxy_tool_sheds_conf_file = os.environ.get(
        'GALAXY_TEST_TOOL_SHEDS_CONF',
        os.path.join(tool_shed_test_tmp_dir, 'test_sheds_conf.xml'))
    if 'GALAXY_TEST_TOOL_DATA_PATH' in os.environ:
        tool_data_path = os.environ.get('GALAXY_TEST_TOOL_DATA_PATH')
    else:
        tool_data_path = tempfile.mkdtemp(dir=tool_shed_test_tmp_dir)
        os.environ['GALAXY_TEST_TOOL_DATA_PATH'] = tool_data_path
    if 'GALAXY_TEST_DBPATH' in os.environ:
        galaxy_db_path = os.environ['GALAXY_TEST_DBPATH']
    else:
        tempdir = tempfile.mkdtemp(dir=tool_shed_test_tmp_dir)
        galaxy_db_path = os.path.join(tempdir, 'database')
    shed_file_path = os.path.join(shed_db_path, 'files')
    galaxy_file_path = os.path.join(galaxy_db_path, 'files')
    hgweb_config_file_path = tempfile.mkdtemp(dir=tool_shed_test_tmp_dir)
    new_repos_path = tempfile.mkdtemp(dir=tool_shed_test_tmp_dir)
    galaxy_tempfiles = tempfile.mkdtemp(dir=tool_shed_test_tmp_dir)
    galaxy_shed_tool_path = tempfile.mkdtemp(dir=tool_shed_test_tmp_dir)
    galaxy_migrated_tool_path = tempfile.mkdtemp(dir=tool_shed_test_tmp_dir)
    galaxy_tool_dependency_dir = tempfile.mkdtemp(dir=tool_shed_test_tmp_dir)
    os.environ['GALAXY_TEST_TOOL_DEPENDENCY_DIR'] = galaxy_tool_dependency_dir
    hgweb_config_dir = hgweb_config_file_path
    os.environ['TEST_HG_WEB_CONFIG_DIR'] = hgweb_config_dir
    print "Directory location for hgweb.config:", hgweb_config_dir
    if 'TOOL_SHED_TEST_DBURI' in os.environ:
        toolshed_database_connection = os.environ['TOOL_SHED_TEST_DBURI']
    else:
        toolshed_database_connection = 'sqlite:///' + os.path.join(
            shed_db_path, 'community_test.sqlite')
    galaxy_database_auto_migrate = False
    if 'GALAXY_TEST_DBURI' in os.environ:
        galaxy_database_connection = os.environ['GALAXY_TEST_DBURI']
    else:
        db_path = os.path.join(galaxy_db_path, 'universe.sqlite')
        if 'GALAXY_TEST_DB_TEMPLATE' in os.environ:
            # Middle ground between recreating a completely new
            # database and pointing at existing database with
            # GALAXY_TEST_DBURI. The former requires a lot of setup
            # time, the latter results in test failures in certain
            # cases (namely tool shed tests expecting clean database).
            __copy_database_template(os.environ['GALAXY_TEST_DB_TEMPLATE'],
                                     db_path)
            galaxy_database_auto_migrate = True
        if not os.path.exists(galaxy_db_path):
            os.makedirs(galaxy_db_path)
        galaxy_database_connection = 'sqlite:///%s' % db_path
    if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
        install_galaxy_database_connection = os.environ[
            'GALAXY_TEST_INSTALL_DBURI']
    elif asbool(
            os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED',
                           default_install_db_merged)):
        install_galaxy_database_connection = galaxy_database_connection
    else:
        install_galaxy_db_path = os.path.join(galaxy_db_path, 'install.sqlite')
        install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path
    tool_shed_global_conf = get_webapp_global_conf()
    tool_shed_global_conf['__file__'] = 'tool_shed_wsgi.ini.sample'
    kwargs = dict(
        admin_users='*****@*****.**',
        allow_user_creation=True,
        allow_user_deletion=True,
        database_connection=toolshed_database_connection,
        datatype_converters_config_file='datatype_converters_conf.xml.sample',
        file_path=shed_file_path,
        global_conf=tool_shed_global_conf,
        hgweb_config_dir=hgweb_config_dir,
        job_queue_workers=5,
        id_secret='changethisinproductiontoo',
        log_destination="stdout",
        new_file_path=new_repos_path,
        running_functional_tests=True,
        shed_tool_data_table_config=shed_tool_data_table_conf_file,
        smtp_server='smtp.dummy.string.tld',
        email_from='functional@localhost',
        template_path='templates',
        tool_path=tool_path,
        tool_parse_help=False,
        tool_data_table_config_path=galaxy_tool_data_table_conf_file,
        use_heartbeat=False)
    for dir in [tool_shed_test_tmp_dir]:
        try:
            os.makedirs(dir)
        except OSError:
            pass

    print "Tool shed database connection:", toolshed_database_connection
    print "Galaxy database connection:", galaxy_database_connection

    # Generate the tool_data_table_conf.xml file.
    file(galaxy_tool_data_table_conf_file,
         'w').write(tool_data_table_conf_xml_template)
    # Generate the shed_tool_data_table_conf.xml file.
    file(shed_tool_data_table_conf_file,
         'w').write(tool_data_table_conf_xml_template)
    os.environ[
        'TOOL_SHED_TEST_TOOL_DATA_TABLE_CONF'] = shed_tool_data_table_conf_file
    # ---- Build Tool Shed Application --------------------------------------------------
    toolshedapp = None
    #    if not toolshed_database_connection.startswith( 'sqlite://' ):
    #        kwargs[ 'database_engine_option_max_overflow' ] = '20'
    if tool_dependency_dir is not None:
        kwargs['tool_dependency_dir'] = tool_dependency_dir
    if use_distributed_object_store:
        kwargs['object_store'] = 'distributed'
        kwargs[
            'distributed_object_store_config_file'] = 'distributed_object_store_conf.xml.sample'

    kwargs['global_conf'] = tool_shed_global_conf

    if not toolshed_database_connection.startswith('sqlite://'):
        kwargs['database_engine_option_pool_size'] = '10'

    toolshedapp = ToolshedUniverseApplication(**kwargs)
    database_contexts.tool_shed_context = toolshedapp.model.context
    log.info("Embedded Toolshed application started")

    # ---- Run tool shed webserver ------------------------------------------------------
    tool_shed_server = None
    tool_shed_global_conf['database_connection'] = toolshed_database_connection
    toolshedwebapp = toolshedbuildapp.app_factory(tool_shed_global_conf,
                                                  use_translogger=False,
                                                  static_enabled=True,
                                                  app=toolshedapp)
    if tool_shed_test_port is not None:
        tool_shed_server = httpserver.serve(toolshedwebapp,
                                            host=tool_shed_test_host,
                                            port=tool_shed_test_port,
                                            start_loop=False)
    else:
        random.seed()
        for i in range(0, 9):
            try:
                tool_shed_test_port = str(
                    random.randint(default_tool_shed_test_port_min,
                                   default_tool_shed_test_port_max))
                log.debug(
                    "Attempting to serve app on randomly chosen port: %s" %
                    tool_shed_test_port)
                tool_shed_server = httpserver.serve(toolshedwebapp,
                                                    host=tool_shed_test_host,
                                                    port=tool_shed_test_port,
                                                    start_loop=False)
                break
            except socket.error, e:
                if e[0] == 98:
                    continue
                raise
        else:
Esempio n. 41
0
    def containerize_command(self, command):
        env_directives = []
        for pass_through_var in self.tool_info.env_pass_through:
            env_directives.append('"{}=${}"'.format(pass_through_var,
                                                    pass_through_var))

        # Allow destinations to explicitly set environment variables just for
        # docker container. Better approach is to set for destination and then
        # pass through only what tool needs however. (See todo in ToolInfo.)
        for key, value in self.destination_info.items():
            if key.startswith("docker_env_"):
                env = key[len("docker_env_"):]
                env_directives.append('"{}={}"'.format(env, value))

        working_directory = self.job_info.working_directory
        if not working_directory:
            raise Exception(
                "Cannot containerize command [%s] without defined working directory."
                % working_directory)

        volumes_raw = self._expand_volume_str(
            self.destination_info.get("docker_volumes", "$defaults"))
        preprocessed_volumes_list = preprocess_volumes(volumes_raw,
                                                       self.container_type)
        # TODO: Remove redundant volumes...
        volumes = [DockerVolume.from_str(v) for v in preprocessed_volumes_list]
        # If a tool definitely has a temp directory available set it to /tmp in container for compat.
        # with CWL. This is part of that spec and should make it easier to share containers between CWL
        # and Galaxy.
        if self.job_info.tmp_directory is not None:
            volumes.append(
                DockerVolume.from_str("%s:/tmp:rw" %
                                      self.job_info.tmp_directory))
        else:
            volumes.append(
                DockerVolume.from_str(
                    "$_GALAXY_JOB_TMP_DIR:$_GALAXY_JOB_TMP_DIR:rw"))
        volumes_from = self.destination_info.get(
            "docker_volumes_from", docker_util.DEFAULT_VOLUMES_FROM)
        docker_host_props = self.docker_host_props

        cached_image_file = self.__get_cached_image_file()
        if not cached_image_file:
            # TODO: Add option to cache it once here and create cached_image_file.
            cache_command = docker_util.build_docker_cache_command(
                self.container_id, **docker_host_props)
        else:
            cache_command = self.__cache_from_file_command(
                cached_image_file, docker_host_props)
        run_command = docker_util.build_docker_run_command(
            command,
            self.container_id,
            volumes=volumes,
            volumes_from=volumes_from,
            env_directives=env_directives,
            working_directory=working_directory,
            net=self.prop(
                "net",
                None),  # By default, docker instance has networking disabled
            auto_rm=asbool(
                self.prop("auto_rm", docker_util.DEFAULT_AUTO_REMOVE)),
            set_user=self.prop("set_user", docker_util.DEFAULT_SET_USER),
            run_extra_arguments=self.prop(
                "run_extra_arguments",
                docker_util.DEFAULT_RUN_EXTRA_ARGUMENTS),
            guest_ports=self.tool_info.guest_ports,
            container_name=self.container_name,
            **docker_host_props)
        kill_command = docker_util.build_docker_simple_command(
            "kill", container_name=self.container_name, **docker_host_props)
        # Suppress standard error below in the kill command because it can cause jobs that otherwise would work
        # to fail. Likely, in these cases the container has been stopped normally and so cannot be stopped again.
        # A less hacky approach might be to check if the container is running first before trying to kill.
        # https://stackoverflow.com/questions/34228864/stop-and-delete-docker-container-if-its-running
        # Standard error is:
        #    Error response from daemon: Cannot kill container: 2b0b961527574ebc873256b481bbe72e: No such container: 2b0b961527574ebc873256b481bbe72e
        return """
_on_exit() {{
  {} &> /dev/null
}}
trap _on_exit 0
{}\n{}""".format(kill_command, cache_command, run_command)
Esempio n. 42
0
    def queue_job(self, job_wrapper):
        """Create job script and submit it to the DRM"""

        # prepare the job
        include_metadata = asbool(
            job_wrapper.job_destination.params.get("embed_metadata_in_job",
                                                   True))
        if not self.prepare_job(job_wrapper,
                                include_metadata=include_metadata):
            return

        # get configured job destination
        job_destination = job_wrapper.job_destination

        # wrapper.get_id_tag() instead of job_id for compatibility with TaskWrappers.
        galaxy_id_tag = job_wrapper.get_id_tag()

        # get destination params
        query_params = submission_params(prefix="", **job_destination.params)
        galaxy_slots = query_params.get('request_cpus', None)
        if galaxy_slots:
            galaxy_slots_statement = 'GALAXY_SLOTS="%s"; export GALAXY_SLOTS_CONFIGURED="1"' % galaxy_slots
        else:
            galaxy_slots_statement = 'GALAXY_SLOTS="1"'

        # define job attributes
        cjs = CondorJobState(files_dir=self.app.config.cluster_files_directory,
                             job_wrapper=job_wrapper)

        cluster_directory = self.app.config.cluster_files_directory
        cjs.user_log = os.path.join(cluster_directory,
                                    'galaxy_%s.condor.log' % galaxy_id_tag)
        cjs.register_cleanup_file_attribute('user_log')
        submit_file = os.path.join(cluster_directory,
                                   'galaxy_%s.condor.desc' % galaxy_id_tag)
        executable = cjs.job_file

        build_submit_params = dict(
            executable=executable,
            output=cjs.output_file,
            error=cjs.error_file,
            user_log=cjs.user_log,
            query_params=query_params,
        )

        submit_file_contents = build_submit_description(**build_submit_params)
        script = self.get_job_file(
            job_wrapper,
            exit_code_path=cjs.exit_code_file,
            slots_statement=galaxy_slots_statement,
        )
        try:
            fh = file(executable, "w")
            fh.write(script)
            fh.close()
            os.chmod(executable, 0o750)
        except:
            job_wrapper.fail("failure preparing job script", exception=True)
            log.exception("(%s) failure preparing job script" % galaxy_id_tag)
            return

        try:
            open(submit_file, "w").write(submit_file_contents)
        except:
            if self.app.config.cleanup_job == "always":
                cjs.cleanup()
                # job_wrapper.fail() calls job_wrapper.cleanup()
            job_wrapper.fail("failure preparing submit file", exception=True)
            log.exception("(%s) failure preparing submit file" % galaxy_id_tag)
            return

        # job was deleted while we were preparing it
        if job_wrapper.get_state() == model.Job.states.DELETED:
            log.debug("Job %s deleted by user before it entered the queue" %
                      galaxy_id_tag)
            if self.app.config.cleanup_job in ("always", "onsuccess"):
                os.unlink(submit_file)
                cjs.cleanup()
                job_wrapper.cleanup()
            return

        log.debug("(%s) submitting file %s" % (galaxy_id_tag, executable))

        external_job_id, message = condor_submit(submit_file)
        if external_job_id is None:
            log.debug("condor_submit failed for job %s: %s" %
                      (job_wrapper.get_id_tag(), message))
            if self.app.config.cleanup_job == "always":
                os.unlink(submit_file)
                cjs.cleanup()
            job_wrapper.fail("condor_submit failed", exception=True)
            return

        os.unlink(submit_file)

        log.info("(%s) queued as %s" % (galaxy_id_tag, external_job_id))

        # store runner information for tracking if Galaxy restarts
        job_wrapper.set_job_destination(job_destination, external_job_id)

        # Store DRM related state information for job
        cjs.job_id = external_job_id
        cjs.job_destination = job_destination

        # Add to our 'queue' of jobs to monitor
        self.monitor_queue.put(cjs)
Esempio n. 43
0
    def index(self, trans, folder_id, **kwd):
        """
        GET /api/folders/{encoded_folder_id}/contents

        Displays a collection (list) of a folder's contents
        (files and folders). Encoded folder ID is prepended
        with 'F' if it is a folder as opposed to a data set
        which does not have it. Full path is provided in
        response as a separate object providing data for
        breadcrumb path building.

        :param  folder_id: encoded ID of the folder which
            contents should be library_dataset_dict
        :type   folder_id: encoded string

        :param kwd: keyword dictionary with other params
        :type  kwd: dict

        :returns: dictionary containing all items and metadata
        :type:    dict

        :raises: MalformedId, InconsistentDatabase, ObjectNotFound,
             InternalServerError
        """
        is_admin = trans.user_is_admin
        deleted = kwd.get('include_deleted', 'missing')
        current_user_roles = trans.get_current_user_roles()
        try:
            deleted = util.asbool(deleted)
        except ValueError:
            deleted = False

        decoded_folder_id = self.folder_manager.cut_and_decode(
            trans, folder_id)
        folder = self.folder_manager.get(trans, decoded_folder_id)

        # Special level of security on top of libraries.
        if trans.app.security_agent.can_access_library(
                current_user_roles, folder.parent_library) or is_admin:
            pass
        else:
            if trans.user:
                log.warning(
                    "SECURITY: User (id: %s) without proper access rights is trying to load folder with ID of %s"
                    % (trans.user.id, decoded_folder_id))
            else:
                log.warning(
                    "SECURITY: Anonymous user is trying to load restricted folder with ID of %s"
                    % (decoded_folder_id))
            raise exceptions.ObjectNotFound(
                'Folder with the id provided ( %s ) was not found' %
                str(folder_id))

        folder_contents = []
        update_time = ''
        create_time = ''
        #  Go through every accessible item (folders, datasets) in the folder and include its metadata.
        for content_item in self._load_folder_contents(trans, folder, deleted):
            return_item = {}
            encoded_id = trans.security.encode_id(content_item.id)
            update_time = content_item.update_time.strftime(
                "%Y-%m-%d %I:%M %p")
            create_time = content_item.create_time.strftime(
                "%Y-%m-%d %I:%M %p")

            if content_item.api_type == 'folder':
                encoded_id = 'F' + encoded_id
                can_modify = is_admin or (
                    trans.user
                    and trans.app.security_agent.can_modify_library_item(
                        current_user_roles, folder))
                can_manage = is_admin or (
                    trans.user
                    and trans.app.security_agent.can_manage_library_item(
                        current_user_roles, folder))
                return_item.update(
                    dict(can_modify=can_modify, can_manage=can_manage))
                if content_item.description:
                    return_item.update(
                        dict(description=content_item.description))

            elif content_item.api_type == 'file':
                #  Is the dataset public or private?
                #  When both are False the dataset is 'restricted'
                #  Access rights are checked on the dataset level, not on the ld or ldda level to maintain consistency
                dataset = content_item.library_dataset_dataset_association.dataset
                is_unrestricted = trans.app.security_agent.dataset_is_public(
                    dataset)
                if trans.user and trans.app.security_agent.dataset_is_private_to_user(
                        trans, dataset):
                    is_private = True
                else:
                    is_private = False

                # Can user manage the permissions on the dataset?
                can_manage = is_admin or (
                    trans.user and trans.app.security_agent.can_manage_dataset(
                        current_user_roles, content_item.
                        library_dataset_dataset_association.dataset))

                nice_size = util.nice_size(
                    int(content_item.library_dataset_dataset_association.
                        get_size()))

                library_dataset_dict = content_item.to_dict()
                encoded_ldda_id = trans.security.encode_id(
                    content_item.library_dataset_dataset_association.id)
                return_item.update(
                    dict(file_ext=library_dataset_dict['file_ext'],
                         date_uploaded=library_dataset_dict['date_uploaded'],
                         is_unrestricted=is_unrestricted,
                         is_private=is_private,
                         can_manage=can_manage,
                         state=library_dataset_dict['state'],
                         file_size=nice_size,
                         ldda_id=encoded_ldda_id))
                if content_item.library_dataset_dataset_association.message:
                    return_item.update(
                        dict(message=content_item.
                             library_dataset_dataset_association.message))

            # For every item include the default metadata
            return_item.update(
                dict(id=encoded_id,
                     type=content_item.api_type,
                     name=content_item.name,
                     update_time=update_time,
                     create_time=create_time,
                     deleted=content_item.deleted))
            folder_contents.append(return_item)

        # Return the reversed path so it starts with the library node.
        full_path = self.build_path(trans, folder)[::-1]

        # Check whether user can add items to the current folder
        can_add_library_item = is_admin or trans.app.security_agent.can_add_library_item(
            current_user_roles, folder)

        # Check whether user can modify the current folder
        can_modify_folder = is_admin or trans.app.security_agent.can_modify_library_item(
            current_user_roles, folder)

        parent_library_id = None
        if folder.parent_library is not None:
            parent_library_id = trans.security.encode_id(
                folder.parent_library.id)

        metadata = dict(full_path=full_path,
                        can_add_library_item=can_add_library_item,
                        can_modify_folder=can_modify_folder,
                        folder_name=folder.name,
                        folder_description=folder.description,
                        parent_library_id=parent_library_id)
        folder_container = dict(metadata=metadata,
                                folder_contents=folder_contents)
        return folder_container
Esempio n. 44
0
 def handle_repository_dependencies_container_entry( self, repository_dependencies_folder, rd_key, rd_value, folder_id,
                                                     repository_dependency_id, folder_keys ):
     repository_components_tuple = container_util.get_components_from_key( rd_key )
     components_list = suc.extract_components_from_tuple( repository_components_tuple )
     toolshed, repository_name, repository_owner, changeset_revision = components_list[ 0:4 ]
     # For backward compatibility to the 12/20/12 Galaxy release.
     if len( components_list ) == 4:
         prior_installation_required = 'False'
         only_if_compiling_contained_td = 'False'
     elif len( components_list ) == 5:
         prior_installation_required = components_list[ 4 ]
         only_if_compiling_contained_td = 'False'
     elif len( components_list ) == 6:
         prior_installation_required = components_list[ 4 ]
         only_if_compiling_contained_td = components_list[ 5 ]
     folder = self.get_folder( repository_dependencies_folder, rd_key )
     label = self.generate_repository_dependencies_folder_label_from_key( repository_name,
                                                                          repository_owner,
                                                                          changeset_revision,
                                                                          prior_installation_required,
                                                                          only_if_compiling_contained_td,
                                                                          repository_dependencies_folder.key )
     if folder:
         if rd_key not in folder_keys:
             folder_id += 1
             sub_folder = Folder( id=folder_id, key=rd_key, label=label, parent=folder )
             folder.folders.append( sub_folder )
         else:
             sub_folder = folder
     else:
         folder_id += 1
         sub_folder = Folder( id=folder_id, key=rd_key, label=label, parent=repository_dependencies_folder )
         repository_dependencies_folder.folders.append( sub_folder )
     if self.app.name == 'galaxy':
         # Insert a header row.
         repository_dependency_id += 1
         repository_dependency = RepositoryDependency( id=repository_dependency_id,
                                                       repository_name='Name',
                                                       changeset_revision='Revision',
                                                       repository_owner='Owner',
                                                       installation_status='Installation status' )
         # Insert the header row into the folder.
         sub_folder.repository_dependencies.append( repository_dependency )
     for repository_dependency in rd_value:
         if self.app.name == 'galaxy':
             tool_shed_repository_id, installation_status, repository_dependency = \
                 self.get_components_from_repository_dependency_for_installed_repository( repository_dependency )
         else:
             tool_shed_repository_id = None
             installation_status = None
         can_create_dependency = not self.is_subfolder_of( sub_folder, repository_dependency )
         if can_create_dependency:
             toolshed, repository_name, repository_owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
                 common_util.parse_repository_dependency_tuple( repository_dependency )
             repository_dependency_id += 1
             repository_dependency = RepositoryDependency( id=repository_dependency_id,
                                                           toolshed=toolshed,
                                                           repository_name=repository_name,
                                                           repository_owner=repository_owner,
                                                           changeset_revision=changeset_revision,
                                                           prior_installation_required=util.asbool( prior_installation_required ),
                                                           only_if_compiling_contained_td=util.asbool( only_if_compiling_contained_td ),
                                                           installation_status=installation_status,
                                                           tool_shed_repository_id=tool_shed_repository_id )
             # Insert the repository_dependency into the folder.
             sub_folder.repository_dependencies.append( repository_dependency )
     return repository_dependencies_folder, folder_id, repository_dependency_id
Esempio n. 45
0
    def load_from_element(self, elem, tool_path):
        assert elem.tag == 'data_manager', 'A data manager configuration must have a "data_manager" tag as the root. "%s" is present' % (
            elem.tag)
        self.declared_id = elem.get('id', None)
        self.guid = elem.get('guid', None)
        path = elem.get('tool_file', None)
        self.version = elem.get('version', self.version)
        tool_shed_repository_id = None
        tool_guid = None

        if path is None:
            tool_elem = elem.find('tool')
            assert tool_elem is not None, "Error loading tool for data manager. Make sure that a tool_file attribute or a tool tag set has been defined:\n%s" % (
                util.xml_to_string(elem))
            path = tool_elem.get("file", None)
            tool_guid = tool_elem.get("guid", None)
            # need to determine repository info so that dependencies will work correctly
            if hasattr(
                    self.data_managers.app, 'tool_cache'
            ) and tool_guid in self.data_managers.app.tool_cache._tool_paths_by_id:
                path = self.data_managers.app.tool_cache._tool_paths_by_id[
                    tool_guid]
                tool = self.data_managers.app.tool_cache.get_tool(path)
                tool_shed_repository = tool.tool_shed_repository
                self.tool_shed_repository_info_dict = dict(
                    tool_shed=tool_shed_repository.tool_shed,
                    name=tool_shed_repository.name,
                    owner=tool_shed_repository.owner,
                    installed_changeset_revision=tool_shed_repository.
                    installed_changeset_revision)
                tool_shed_repository_id = self.data_managers.app.security.encode_id(
                    tool_shed_repository.id)
                tool_path = ""
            else:
                tool_shed_url = tool_elem.find('tool_shed').text
                # Handle protocol changes.
                tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(
                    self.data_managers.app, tool_shed_url)
                # The protocol is not stored in the database.
                tool_shed = common_util.remove_protocol_from_tool_shed_url(
                    tool_shed_url)
                repository_name = tool_elem.find('repository_name').text
                repository_owner = tool_elem.find('repository_owner').text
                installed_changeset_revision = tool_elem.find(
                    'installed_changeset_revision').text
                self.tool_shed_repository_info_dict = dict(
                    tool_shed=tool_shed,
                    name=repository_name,
                    owner=repository_owner,
                    installed_changeset_revision=installed_changeset_revision)
                tool_shed_repository = \
                    repository_util.get_installed_repository(self.data_managers.app,
                                                             tool_shed=tool_shed,
                                                             name=repository_name,
                                                             owner=repository_owner,
                                                             installed_changeset_revision=installed_changeset_revision)
                if tool_shed_repository is None:
                    log.warning(
                        'Could not determine tool shed repository from database. This should only ever happen when running tests.'
                    )
                    # we'll set tool_path manually here from shed_conf_file
                    tool_shed_repository_id = None
                    try:
                        tool_path = util.parse_xml(
                            elem.get('shed_conf_file')).getroot().get(
                                'tool_path', tool_path)
                    except Exception as e:
                        log.error(
                            'Error determining tool_path for Data Manager during testing: %s',
                            e)
                else:
                    tool_shed_repository_id = self.data_managers.app.security.encode_id(
                        tool_shed_repository.id)
                # use shed_conf_file to determine tool_path
                shed_conf_file = elem.get("shed_conf_file", None)
                if shed_conf_file:
                    shed_conf = self.data_managers.app.toolbox.get_shed_config_dict_by_filename(
                        shed_conf_file, None)
                    if shed_conf:
                        tool_path = shed_conf.get("tool_path", tool_path)
        assert path is not None, "A tool file path could not be determined:\n%s" % (
            util.xml_to_string(elem))
        self.load_tool(os.path.join(tool_path, path),
                       guid=tool_guid,
                       data_manager_id=self.id,
                       tool_shed_repository_id=tool_shed_repository_id)
        self.name = elem.get('name', self.tool.name)
        self.description = elem.get('description', self.tool.description)
        self.undeclared_tables = util.asbool(
            elem.get('undeclared_tables', self.undeclared_tables))

        for data_table_elem in elem.findall('data_table'):
            data_table_name = data_table_elem.get("name")
            assert data_table_name is not None, "A name is required for a data table entry"
            if data_table_name not in self.data_tables:
                self.data_tables[data_table_name] = odict()
            output_elem = data_table_elem.find('output')
            if output_elem is not None:
                for column_elem in output_elem.findall('column'):
                    column_name = column_elem.get('name', None)
                    assert column_name is not None, "Name is required for column entry"
                    data_table_coumn_name = column_elem.get(
                        'data_table_name', column_name)
                    self.data_tables[data_table_name][
                        data_table_coumn_name] = column_name
                    output_ref = column_elem.get('output_ref', None)
                    if output_ref is not None:
                        if data_table_name not in self.output_ref_by_data_table:
                            self.output_ref_by_data_table[data_table_name] = {}
                        self.output_ref_by_data_table[data_table_name][
                            data_table_coumn_name] = output_ref
                    value_translation_elems = column_elem.findall(
                        'value_translation')
                    if value_translation_elems is not None:
                        for value_translation_elem in value_translation_elems:
                            value_translation = value_translation_elem.text
                            if value_translation is not None:
                                value_translation_type = value_translation_elem.get(
                                    'type', DEFAULT_VALUE_TRANSLATION_TYPE)
                                if data_table_name not in self.value_translation_by_data_table_column:
                                    self.value_translation_by_data_table_column[
                                        data_table_name] = {}
                                if data_table_coumn_name not in self.value_translation_by_data_table_column[
                                        data_table_name]:
                                    self.value_translation_by_data_table_column[
                                        data_table_name][
                                            data_table_coumn_name] = []
                                if value_translation_type == 'function':
                                    if value_translation in VALUE_TRANSLATION_FUNCTIONS:
                                        value_translation = VALUE_TRANSLATION_FUNCTIONS[
                                            value_translation]
                                    else:
                                        raise ValueError(
                                            "Unsupported value translation function: '%s'"
                                            % (value_translation))
                                else:
                                    assert value_translation_type == DEFAULT_VALUE_TRANSLATION_TYPE, ValueError(
                                        "Unsupported value translation type: '%s'"
                                        % (value_translation_type))
                                self.value_translation_by_data_table_column[
                                    data_table_name][
                                        data_table_coumn_name].append(
                                            value_translation)

                    for move_elem in column_elem.findall('move'):
                        move_type = move_elem.get('type', 'directory')
                        relativize_symlinks = move_elem.get(
                            'relativize_symlinks', False
                        )  # TODO: should we instead always relativize links?
                        source_elem = move_elem.find('source')
                        if source_elem is None:
                            source_base = None
                            source_value = ''
                        else:
                            source_base = source_elem.get('base', None)
                            source_value = source_elem.text
                        target_elem = move_elem.find('target')
                        if target_elem is None:
                            target_base = None
                            target_value = ''
                        else:
                            target_base = target_elem.get('base', None)
                            target_value = target_elem.text
                        if data_table_name not in self.move_by_data_table_column:
                            self.move_by_data_table_column[
                                data_table_name] = {}
                        self.move_by_data_table_column[data_table_name][data_table_coumn_name] = \
                            dict(type=move_type,
                                 source_base=source_base,
                                 source_value=source_value,
                                 target_base=target_base,
                                 target_value=target_value,
                                 relativize_symlinks=relativize_symlinks)
Esempio n. 46
0
def app_factory(global_conf, **kwargs):
    """Return a wsgi application serving the root object"""
    # Create the Galaxy tool shed application unless passed in
    if 'app' in kwargs:
        app = kwargs.pop('app')
    else:
        try:
            from galaxy.webapps.tool_shed.app import UniverseApplication
            app = UniverseApplication(global_conf=global_conf, **kwargs)
        except:
            import traceback, sys
            traceback.print_exc()
            sys.exit(1)
    atexit.register(app.shutdown)
    # Create the universe WSGI application
    webapp = CommunityWebApplication(app,
                                     session_cookie='galaxycommunitysession',
                                     name="tool_shed")
    add_ui_controllers(webapp, app)
    webapp.add_route('/view/{owner}',
                     controller='repository',
                     action='sharable_owner')
    webapp.add_route('/view/{owner}/{name}',
                     controller='repository',
                     action='sharable_repository')
    webapp.add_route('/view/{owner}/{name}/{changeset_revision}',
                     controller='repository',
                     action='sharable_repository_revision')
    # Handle displaying tool help images and README file images for tools contained in repositories.
    webapp.add_route('/repository/static/images/:repository_id/:image_file',
                     controller='repository',
                     action='display_image_in_repository',
                     repository_id=None,
                     image_file=None)
    webapp.add_route('/:controller/:action', action='index')
    webapp.add_route('/:action', controller='repository', action='index')
    webapp.add_route('/repos/*path_info',
                     controller='hg',
                     action='handle_request',
                     path_info='/')
    # Add the web API.  # A good resource for RESTful services - http://routes.readthedocs.org/en/latest/restful.html
    webapp.add_api_controllers('galaxy.webapps.tool_shed.api', app)
    webapp.mapper.connect('api_key_retrieval',
                          '/api/authenticate/baseauth/',
                          controller='authenticate',
                          action='get_tool_shed_api_key',
                          conditions=dict(method=["GET"]))
    webapp.mapper.connect('repo_search',
                          '/api/search/',
                          controller='search',
                          action='search',
                          conditions=dict(method=["GET"]))
    webapp.mapper.resource('category',
                           'categories',
                           controller='categories',
                           name_prefix='category_',
                           path_prefix='/api',
                           parent_resources=dict(member_name='category',
                                                 collection_name='categories'))
    webapp.mapper.resource('repository',
                           'repositories',
                           controller='repositories',
                           collection={
                               'add_repository_registry_entry': 'POST',
                               'get_repository_revision_install_info': 'GET',
                               'get_ordered_installable_revisions': 'GET',
                               'remove_repository_registry_entry': 'POST',
                               'repository_ids_for_setting_metadata': 'GET',
                               'reset_metadata_on_repositories': 'POST',
                               'reset_metadata_on_repository': 'POST'
                           },
                           name_prefix='repository_',
                           path_prefix='/api',
                           new={'import_capsule': 'POST'},
                           parent_resources=dict(
                               member_name='repository',
                               collection_name='repositories'))
    webapp.mapper.resource('repository_revision',
                           'repository_revisions',
                           member={
                               'repository_dependencies': 'GET',
                               'export': 'POST'
                           },
                           controller='repository_revisions',
                           name_prefix='repository_revision_',
                           path_prefix='/api',
                           parent_resources=dict(
                               member_name='repository_revision',
                               collection_name='repository_revisions'))
    webapp.mapper.resource('user',
                           'users',
                           controller='users',
                           name_prefix='user_',
                           path_prefix='/api',
                           parent_resources=dict(member_name='user',
                                                 collection_name='users'))
    webapp.mapper.connect('repository_create_changeset_revision',
                          '/api/repositories/:id/changeset_revision',
                          controller='repositories',
                          action='create_changeset_revision',
                          conditions=dict(method=["POST"]))
    webapp.mapper.connect('create_repository',
                          '/api/repositories',
                          controller='repositories',
                          action='create',
                          conditions=dict(method=["POST"]))

    webapp.finalize_config()
    # Wrap the webapp in some useful middleware
    if kwargs.get('middleware', True):
        webapp = wrap_in_middleware(webapp, global_conf, **kwargs)
    if asbool(kwargs.get('static_enabled', True)):
        webapp = wrap_in_static(webapp, global_conf, **kwargs)
    # Close any pooled database connections before forking
    try:
        galaxy.webapps.tool_shed.model.mapping.metadata.engine.connection_provider._pool.dispose(
        )
    except:
        pass
    # Return
    return webapp
Esempio n. 47
0
 def __init__(self, environ, app, webapp):
     self.app = app
     self.webapp = webapp
     self.security = webapp.security
     galaxy.web.framework.base.DefaultWebTransaction.__init__(self, environ)
     self.debug = asbool(self.app.config.get('debug', False))
Esempio n. 48
0
    def index(self, trans, **kwd):
        """
        index( self, trans, **kwd )
        * GET /api/libraries:
            Returns a list of summary data for all libraries.

        :param  deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted``
        :type   deleted: boolean (optional)

        :returns:   list of dictionaries containing library information
        :rtype:     list

        .. seealso:: :attr:`galaxy.model.Library.dict_collection_visible_keys`

        """
        query = trans.sa_session.query(trans.app.model.Library)
        deleted = kwd.get('deleted', 'missing')
        try:
            if not trans.user_is_admin():
                # non-admins can't see deleted libraries
                deleted = False
            else:
                deleted = util.asbool(deleted)
            if deleted:
                query = query.filter(
                    trans.app.model.Library.table.c.deleted == True)
            else:
                query = query.filter(
                    trans.app.model.Library.table.c.deleted == False)
        except ValueError:
            # given value wasn't true/false but the user is admin so we don't filter on this parameter at all
            pass

        current_user_role_ids = [
            role.id for role in trans.get_current_user_roles()
        ]
        library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
        restricted_library_ids = [
            lp.library_id for lp in (
                trans.sa_session.query(trans.model.LibraryPermissions).filter(
                    trans.model.LibraryPermissions.table.c.action ==
                    library_access_action).distinct())
        ]
        accessible_restricted_library_ids = [
            lp.library_id for lp in (
                trans.sa_session.query(trans.model.LibraryPermissions).filter(
                    and_(
                        trans.model.LibraryPermissions.table.c.action ==
                        library_access_action,
                        trans.model.LibraryPermissions.table.c.role_id.in_(
                            current_user_role_ids))))
        ]
        query = query.filter(
            or_(
                not_(trans.model.Library.table.c.id.in_(
                    restricted_library_ids)),
                trans.model.Library.table.c.id.in_(
                    accessible_restricted_library_ids)))
        libraries = []
        for library in query:
            item = library.to_dict(view='element',
                                   value_mapper={
                                       'id': trans.security.encode_id,
                                       'root_folder_id':
                                       trans.security.encode_id
                                   })
            if trans.app.security_agent.library_is_public(library,
                                                          contents=False):
                item['public'] = True
            current_user_roles = trans.get_current_user_roles()
            if not trans.user_is_admin():
                item[
                    'can_user_add'] = trans.app.security_agent.can_add_library_item(
                        current_user_roles, library)
                item[
                    'can_user_modify'] = trans.app.security_agent.can_modify_library_item(
                        current_user_roles, library)
                item[
                    'can_user_manage'] = trans.app.security_agent.can_manage_library_item(
                        current_user_roles, library)
            else:
                item['can_user_add'] = True
                item['can_user_modify'] = True
                item['can_user_manage'] = True
            libraries.append(item)
        return libraries
Esempio n. 49
0
    def __init__(self, environ: Dict[str, Any], app, webapp, session_cookie=None) -> None:
        self._app = app
        self.webapp = webapp
        self.user_manager = app[UserManager]
        self.session_manager = app[GalaxySessionManager]
        base.DefaultWebTransaction.__init__(self, environ)
        self.setup_i18n()
        self.expunge_all()
        config = self.app.config
        self.debug = asbool(config.get('debug', False))
        x_frame_options = getattr(config, 'x_frame_options', None)
        if x_frame_options:
            self.response.headers['X-Frame-Options'] = x_frame_options
        # Flag indicating whether we are in workflow building mode (means
        # that the current history should not be used for parameter values
        # and such).
        self.workflow_building_mode = False
        self.__user = None
        self.galaxy_session = None
        self.error_message = None
        self.host = self.request.host

        # set any cross origin resource sharing headers if configured to do so
        self.set_cors_headers()

        if self.environ.get('is_api_request', False):
            # With API requests, if there's a key, use it and associate the
            # user with the transaction.
            # If not, check for an active session but do not create one.
            # If an error message is set here, it's sent back using
            # trans.show_error in the response -- in expose_api.
            self.error_message = self._authenticate_api(session_cookie)
        elif self.app.name == "reports":
            self.galaxy_session = None
        else:
            # This is a web request, get or create session.
            self._ensure_valid_session(session_cookie)
        if self.galaxy_session:
            # When we've authenticated by session, we have to check the
            # following.
            # Prevent deleted users from accessing Galaxy
            if config.use_remote_user and self.galaxy_session.user.deleted:
                self.response.send_redirect(url_for('/static/user_disabled.html'))
            if config.require_login:
                self._ensure_logged_in_user(environ, session_cookie)
            if config.session_duration:
                # TODO DBTODO All ajax calls from the client need to go through
                # a single point of control where we can do things like
                # redirect/etc.  This is API calls as well as something like 40
                # @web.json requests that might not get handled well on the
                # clientside.
                #
                # Make sure we're not past the duration, and either log out or
                # update timestamp.
                now = datetime.datetime.now()
                if self.galaxy_session.last_action:
                    expiration_time = self.galaxy_session.last_action + datetime.timedelta(minutes=config.session_duration)
                else:
                    expiration_time = now
                    self.galaxy_session.last_action = now - datetime.timedelta(seconds=1)
                    self.sa_session.add(self.galaxy_session)
                    self.sa_session.flush()
                if expiration_time < now:
                    # Expiration time has passed.
                    self.handle_user_logout()
                    if self.environ.get('is_api_request', False):
                        self.response.status = 401
                        self.user = None
                        self.galaxy_session = None
                    else:
                        self.response.send_redirect(url_for(controller='root',
                                                     action='login',
                                                     message="You have been logged out due to inactivity.  Please log in again to continue using Galaxy.",
                                                     status='info',
                                                     use_panels=True))
                else:
                    self.galaxy_session.last_action = now
                    self.sa_session.add(self.galaxy_session)
                    self.sa_session.flush()
Esempio n. 50
0
def wrap_in_middleware(app, global_conf, **local_conf):
    """
    Based on the configuration wrap `app` in a set of common and useful
    middleware.
    """
    webapp = app

    # Merge the global and local configurations
    conf = global_conf.copy()
    conf.update(local_conf)
    debug = asbool(conf.get('debug', False))
    # First put into place httpexceptions, which must be most closely
    # wrapped around the application (it can interact poorly with
    # other middleware):
    app = httpexceptions.make_middleware(app, conf)
    log.debug("Enabling 'httpexceptions' middleware")
    # Statsd request timing and profiling
    statsd_host = conf.get('statsd_host', None)
    if statsd_host:
        from galaxy.web.framework.middleware.statsd import StatsdMiddleware
        app = StatsdMiddleware(app, statsd_host, conf.get('statsd_port', 8125),
                               conf.get('statsd_prefix', 'galaxy'))
        log.debug("Enabling 'statsd' middleware")
    # If we're using remote_user authentication, add middleware that
    # protects Galaxy from improperly configured authentication in the
    # upstream server
    single_user = conf.get('single_user', None)
    use_remote_user = asbool(conf.get('use_remote_user', False)) or single_user
    if use_remote_user:
        from galaxy.web.framework.middleware.remoteuser import RemoteUser
        app = RemoteUser(
            app,
            maildomain=conf.get('remote_user_maildomain', None),
            display_servers=util.listify(conf.get('display_servers', '')),
            single_user=single_user,
            admin_users=conf.get('admin_users', '').split(','),
            remote_user_header=conf.get('remote_user_header',
                                        'HTTP_REMOTE_USER'),
            remote_user_secret_header=conf.get('remote_user_secret', None),
            normalize_remote_user_email=conf.get('normalize_remote_user_email',
                                                 False))
    # The recursive middleware allows for including requests in other
    # requests or forwarding of requests, all on the server side.
    if asbool(conf.get('use_recursive', True)):
        from paste import recursive
        app = recursive.RecursiveMiddleware(app, conf)
        log.debug("Enabling 'recursive' middleware")
    # If sentry logging is enabled, log here before propogating up to
    # the error middleware
    sentry_dsn = conf.get('sentry_dsn', None)
    if sentry_dsn:
        from galaxy.web.framework.middleware.sentry import Sentry
        app = Sentry(app, sentry_dsn)
    # Various debug middleware that can only be turned on if the debug
    # flag is set, either because they are insecure or greatly hurt
    # performance
    if debug:
        # Middleware to check for WSGI compliance
        if asbool(conf.get('use_lint', False)):
            from paste import lint
            app = lint.make_middleware(app, conf)
            log.debug("Enabling 'lint' middleware")
        # Middleware to run the python profiler on each request
        if asbool(conf.get('use_profile', False)):
            from paste.debug import profile
            app = profile.ProfileMiddleware(app, conf)
            log.debug("Enabling 'profile' middleware")
    if debug and asbool(conf.get('use_interactive',
                                 False)) and not process_is_uwsgi:
        # Interactive exception debugging, scary dangerous if publicly
        # accessible, if not enabled we'll use the regular error printing
        # middleware.
        from weberror import evalexception
        app = evalexception.EvalException(
            app, conf, templating_formatters=build_template_error_formatters())
        log.debug("Enabling 'eval exceptions' middleware")
    else:
        if debug and asbool(conf.get('use_interactive',
                                     False)) and process_is_uwsgi:
            log.error(
                "Interactive debugging middleware is enabled in your configuration "
                "but this is a uwsgi process.  Refusing to wrap in interactive error middleware."
            )
        # Not in interactive debug mode, just use the regular error middleware
        import galaxy.web.framework.middleware.error
        app = galaxy.web.framework.middleware.error.ErrorMiddleware(app, conf)
        log.debug("Enabling 'error' middleware")
    # Transaction logging (apache access.log style)
    if asbool(conf.get('use_translogger', True)):
        from galaxy.web.framework.middleware.translogger import TransLogger
        app = TransLogger(app)
        log.debug("Enabling 'trans logger' middleware")
    # X-Forwarded-Host handling
    from galaxy.web.framework.middleware.xforwardedhost import XForwardedHostMiddleware
    app = XForwardedHostMiddleware(app)
    log.debug("Enabling 'x-forwarded-host' middleware")
    # Request ID middleware
    from galaxy.web.framework.middleware.request_id import RequestIDMiddleware
    app = RequestIDMiddleware(app)
    log.debug("Enabling 'Request ID' middleware")

    # api batch call processing middleware
    from galaxy.web.framework.middleware.batch import BatchMiddleware
    app = BatchMiddleware(webapp, app, {})
    log.debug("Enabling 'Batch' middleware")

    return app
Esempio n. 51
0
 def get_required_repo_info_dicts(self, tool_shed_url, repo_info_dicts):
     """
     Inspect the list of repo_info_dicts for repository dependencies and append a repo_info_dict for each of
     them to the list.  All repository_dependency entries in each of the received repo_info_dicts includes
     all required repositories, so only one pass through this method is required to retrieve all repository
     dependencies.
     """
     all_required_repo_info_dict = {}
     all_repo_info_dicts = []
     if repo_info_dicts:
         # We'll send tuples of ( tool_shed, repository_name, repository_owner, changeset_revision ) to the tool
         # shed to discover repository ids.
         required_repository_tups = []
         for repo_info_dict in repo_info_dicts:
             if repo_info_dict not in all_repo_info_dicts:
                 all_repo_info_dicts.append(repo_info_dict)
             for repository_name, repo_info_tup in repo_info_dict.items():
                 description, \
                     repository_clone_url, \
                     changeset_revision, \
                     ctx_rev, \
                     repository_owner, \
                     repository_dependencies, \
                     tool_dependencies = \
                     repository_util.get_repo_info_tuple_contents(repo_info_tup)
                 if repository_dependencies:
                     for key, val in repository_dependencies.items():
                         if key in ['root_key', 'description']:
                             continue
                         repository_components_tuple = get_components_from_key(
                             key)
                         components_list = repository_util.extract_components_from_tuple(
                             repository_components_tuple)
                         # Skip listing a repository dependency if it is required only to compile a tool dependency
                         # defined for the dependent repository since in this case, the repository dependency is really
                         # a dependency of the dependent repository's contained tool dependency, and only if that
                         # tool dependency requires compilation.
                         # For backward compatibility to the 12/20/12 Galaxy release.
                         only_if_compiling_contained_td = 'False'
                         if len(components_list) == 4:
                             only_if_compiling_contained_td = 'False'
                         elif len(components_list) == 5:
                             only_if_compiling_contained_td = 'False'
                         if not asbool(only_if_compiling_contained_td):
                             if components_list not in required_repository_tups:
                                 required_repository_tups.append(
                                     components_list)
                         for components_list in val:
                             try:
                                 only_if_compiling_contained_td = components_list[
                                     5]
                             except IndexError:
                                 only_if_compiling_contained_td = 'False'
                             # Skip listing a repository dependency if it is required only to compile a tool dependency
                             # defined for the dependent repository (see above comment).
                             if not asbool(only_if_compiling_contained_td):
                                 if components_list not in required_repository_tups:
                                     required_repository_tups.append(
                                         components_list)
                 else:
                     # We have a single repository with no dependencies.
                     components_list = [
                         tool_shed_url, repository_name, repository_owner,
                         changeset_revision
                     ]
                     required_repository_tups.append(components_list)
             if required_repository_tups:
                 # The value of required_repository_tups is a list of tuples, so we need to encode it.
                 encoded_required_repository_tups = []
                 for required_repository_tup in required_repository_tups:
                     # Convert every item in required_repository_tup to a string.
                     required_repository_tup = [
                         str(item) for item in required_repository_tup
                     ]
                     encoded_required_repository_tups.append(
                         encoding_util.encoding_sep.join(
                             required_repository_tup))
                 encoded_required_repository_str = encoding_util.encoding_sep2.join(
                     encoded_required_repository_tups)
                 encoded_required_repository_str = encoding_util.tool_shed_encode(
                     encoded_required_repository_str)
                 if repository_util.is_tool_shed_client(self.app):
                     # Handle secure / insecure Tool Shed URL protocol changes and port changes.
                     tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(
                         self.app, tool_shed_url)
                 pathspec = ['repository', 'get_required_repo_info_dict']
                 url = build_url(tool_shed_url, pathspec=pathspec)
                 # Fix for handling 307 redirect not being handled nicely by urlopen() when the Request() has data provided
                 try:
                     url = _urlopen(url).geturl()
                 except HTTPError as e:
                     if e.code == 502:
                         pass
                     else:
                         raise
                 payload = urlencode(
                     dict(encoded_str=encoded_required_repository_str))
                 response = _urlopen(url, payload).read()
                 if response:
                     try:
                         required_repo_info_dict = json.loads(
                             unicodify(response))
                     except Exception as e:
                         log.exception(e)
                         return all_repo_info_dicts
                     required_repo_info_dicts = []
                     for k, v in required_repo_info_dict.items():
                         if k == 'repo_info_dicts':
                             encoded_dict_strings = required_repo_info_dict[
                                 'repo_info_dicts']
                             for encoded_dict_str in encoded_dict_strings:
                                 decoded_dict = encoding_util.tool_shed_decode(
                                     encoded_dict_str)
                                 required_repo_info_dicts.append(
                                     decoded_dict)
                         else:
                             if k not in all_required_repo_info_dict:
                                 all_required_repo_info_dict[k] = v
                             else:
                                 if v and not all_required_repo_info_dict[k]:
                                     all_required_repo_info_dict[k] = v
                         if required_repo_info_dicts:
                             for required_repo_info_dict in required_repo_info_dicts:
                                 # Each required_repo_info_dict has a single entry, and all_repo_info_dicts is a list
                                 # of dictionaries, each of which has a single entry.  We'll check keys here rather than
                                 # the entire dictionary because a dictionary entry in all_repo_info_dicts will include
                                 # lists of discovered repository dependencies, but these lists will be empty in the
                                 # required_repo_info_dict since dependency discovery has not yet been performed for these
                                 # dictionaries.
                                 required_repo_info_dict_key = next(
                                     iter(required_repo_info_dict))
                                 all_repo_info_dicts_keys = [
                                     next(iter(d))
                                     for d in all_repo_info_dicts
                                 ]
                                 if required_repo_info_dict_key not in all_repo_info_dicts_keys:
                                     all_repo_info_dicts.append(
                                         required_repo_info_dict)
                                 else:
                                     # required_repo_info_dict_key corresponds to the repo name.
                                     # A single install transaction might require the installation of 2 or more repos
                                     # with the same repo name but different owners or versions.
                                     # Therefore, if required_repo_info_dict_key is already in all_repo_info_dicts,
                                     # check that the tool id is already present. If it is not, we are dealing with the same repo name,
                                     # but a different owner/changeset revision or version and we add the repo to the list of repos to be installed.
                                     tool_id = required_repo_info_dict[
                                         required_repo_info_dict_key][1]
                                     is_present = False
                                     for repo_info_dict in all_repo_info_dicts:
                                         for k, v in repo_info_dict.items():
                                             if required_repo_info_dict_key == k:
                                                 if tool_id == v[1]:
                                                     is_present = True
                                                     break
                                     if not is_present:
                                         all_repo_info_dicts.append(
                                             required_repo_info_dict)
                     all_required_repo_info_dict[
                         'all_repo_info_dicts'] = all_repo_info_dicts
     return all_required_repo_info_dict
Esempio n. 52
0
    def containerize_command(self, command):
        def prop(name, default):
            destination_name = "docker_%s" % name
            return self.destination_info.get(destination_name, default)

        env_directives = []
        for pass_through_var in self.tool_info.env_pass_through:
            env_directives.append('"%s=$%s"' %
                                  (pass_through_var, pass_through_var))

        # Allow destinations to explicitly set environment variables just for
        # docker container. Better approach is to set for destination and then
        # pass through only what tool needs however. (See todo in ToolInfo.)
        for key, value in six.iteritems(self.destination_info):
            if key.startswith("docker_env_"):
                env = key[len("docker_env_"):]
                env_directives.append('"%s=%s"' % (env, value))

        working_directory = self.job_info.working_directory
        if not working_directory:
            raise Exception(
                "Cannot containerize command [%s] without defined working directory."
                % working_directory)

        volumes_raw = self._expand_volume_str(
            self.destination_info.get("docker_volumes", "$defaults"))
        preprocessed_volumes_list = preprocess_volumes(volumes_raw,
                                                       self.container_type)
        # TODO: Remove redundant volumes...
        volumes = [DockerVolume.from_str(v) for v in preprocessed_volumes_list]
        # If a tool definitely has a temp directory available set it to /tmp in container for compat.
        # with CWL. This is part of that spec and should make it easier to share containers between CWL
        # and Galaxy.
        if self.job_info.tmp_directory is not None:
            volumes.append(
                DockerVolume.from_str("%s:/tmp:rw" %
                                      self.job_info.tmp_directory))
        volumes_from = self.destination_info.get(
            "docker_volumes_from", docker_util.DEFAULT_VOLUMES_FROM)

        docker_host_props = dict(
            docker_cmd=prop("cmd", docker_util.DEFAULT_DOCKER_COMMAND),
            sudo=asbool(prop("sudo", docker_util.DEFAULT_SUDO)),
            sudo_cmd=prop("sudo_cmd", docker_util.DEFAULT_SUDO_COMMAND),
            host=prop("host", docker_util.DEFAULT_HOST),
        )

        cached_image_file = self.__get_cached_image_file()
        if not cached_image_file:
            # TODO: Add option to cache it once here and create cached_image_file.
            cache_command = docker_util.build_docker_cache_command(
                self.container_id, **docker_host_props)
        else:
            cache_command = self.__cache_from_file_command(
                cached_image_file, docker_host_props)
        run_command = docker_util.build_docker_run_command(
            command,
            self.container_id,
            volumes=volumes,
            volumes_from=volumes_from,
            env_directives=env_directives,
            working_directory=working_directory,
            net=prop(
                "net",
                "none"),  # By default, docker instance has networking disabled
            auto_rm=asbool(prop("auto_rm", docker_util.DEFAULT_AUTO_REMOVE)),
            set_user=prop("set_user", docker_util.DEFAULT_SET_USER),
            run_extra_arguments=prop("run_extra_arguments",
                                     docker_util.DEFAULT_RUN_EXTRA_ARGUMENTS),
            **docker_host_props)
        return "%s\n%s" % (cache_command, run_command)
Esempio n. 53
0
 def is_latest_installable_revision(self):
     if self.tool_shed_status:
         return asbool(
             self.tool_shed_status.get('latest_installable_revision',
                                       False))
     return False
Esempio n. 54
0
 def _quote(self, tool_id, **kwd):
     if asbool(kwd.get("tool_id_b64_encoded", False)):
         import base64
         tool_id = base64.b64decode(tool_id)
     tool_id = urllib.parse.quote_plus(tool_id)
     return tool_id
Esempio n. 55
0
 def check_python_openid(self):
     return asbool(self.config["enable_openid"])
 def get_required_repo_info_dicts( self, tool_shed_url, repo_info_dicts ):
     """
     Inspect the list of repo_info_dicts for repository dependencies and append a repo_info_dict for each of
     them to the list.  All repository_dependency entries in each of the received repo_info_dicts includes
     all required repositories, so only one pass through this method is required to retrieve all repository
     dependencies.
     """
     all_required_repo_info_dict = {}
     all_repo_info_dicts = []
     if repo_info_dicts:
         # We'll send tuples of ( tool_shed, repository_name, repository_owner, changeset_revision ) to the tool
         # shed to discover repository ids.
         required_repository_tups = []
         for repo_info_dict in repo_info_dicts:
             if repo_info_dict not in all_repo_info_dicts:
                 all_repo_info_dicts.append( repo_info_dict )
             for repository_name, repo_info_tup in repo_info_dict.items():
                 description, \
                     repository_clone_url, \
                     changeset_revision, \
                     ctx_rev, \
                     repository_owner, \
                     repository_dependencies, \
                     tool_dependencies = \
                     suc.get_repo_info_tuple_contents( repo_info_tup )
                 if repository_dependencies:
                     for key, val in repository_dependencies.items():
                         if key in [ 'root_key', 'description' ]:
                             continue
                         repository_components_tuple = container_util.get_components_from_key( key )
                         components_list = suc.extract_components_from_tuple( repository_components_tuple )
                         # Skip listing a repository dependency if it is required only to compile a tool dependency
                         # defined for the dependent repository since in this case, the repository dependency is really
                         # a dependency of the dependent repository's contained tool dependency, and only if that
                         # tool dependency requires compilation.
                         # For backward compatibility to the 12/20/12 Galaxy release.
                         only_if_compiling_contained_td = 'False'
                         if len( components_list ) == 4:
                             only_if_compiling_contained_td = 'False'
                         elif len( components_list ) == 5:
                             only_if_compiling_contained_td = 'False'
                         if not asbool( only_if_compiling_contained_td ):
                             if components_list not in required_repository_tups:
                                 required_repository_tups.append( components_list )
                         for components_list in val:
                             try:
                                 only_if_compiling_contained_td = components_list[ 5 ]
                             except:
                                 only_if_compiling_contained_td = 'False'
                             # Skip listing a repository dependency if it is required only to compile a tool dependency
                             # defined for the dependent repository (see above comment).
                             if not asbool( only_if_compiling_contained_td ):
                                 if components_list not in required_repository_tups:
                                     required_repository_tups.append( components_list )
                 else:
                     # We have a single repository with no dependencies.
                     components_list = [ tool_shed_url, repository_name, repository_owner, changeset_revision ]
                     required_repository_tups.append( components_list )
             if required_repository_tups:
                 # The value of required_repository_tups is a list of tuples, so we need to encode it.
                 encoded_required_repository_tups = []
                 for required_repository_tup in required_repository_tups:
                     # Convert every item in required_repository_tup to a string.
                     required_repository_tup = [ str( item ) for item in required_repository_tup ]
                     encoded_required_repository_tups.append( encoding_util.encoding_sep.join( required_repository_tup ) )
                 encoded_required_repository_str = encoding_util.encoding_sep2.join( encoded_required_repository_tups )
                 encoded_required_repository_str = encoding_util.tool_shed_encode( encoded_required_repository_str )
                 if suc.is_tool_shed_client( self.app ):
                     # Handle secure / insecure Tool Shed URL protocol changes and port changes.
                     tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( self.app, tool_shed_url )
                 pathspec = [ 'repository', 'get_required_repo_info_dict' ]
                 url = common_util.url_join( tool_shed_url, pathspec=pathspec )
                 # Fix for handling 307 redirect not being handled nicely by urllib2.urlopen when the urllib2.Request has data provided
                 url = urllib2.urlopen( urllib2.Request( url ) ).geturl()
                 request = urllib2.Request( url, data=urllib.urlencode( dict( encoded_str=encoded_required_repository_str ) ) )
                 response = urllib2.urlopen( request ).read()
                 if response:
                     try:
                         required_repo_info_dict = json.loads( response )
                     except Exception, e:
                         log.exception( e )
                         return all_repo_info_dicts
                     required_repo_info_dicts = []
                     for k, v in required_repo_info_dict.items():
                         if k == 'repo_info_dicts':
                             encoded_dict_strings = required_repo_info_dict[ 'repo_info_dicts' ]
                             for encoded_dict_str in encoded_dict_strings:
                                 decoded_dict = encoding_util.tool_shed_decode( encoded_dict_str )
                                 required_repo_info_dicts.append( decoded_dict )
                         else:
                             if k not in all_required_repo_info_dict:
                                 all_required_repo_info_dict[ k ] = v
                             else:
                                 if v and not all_required_repo_info_dict[ k ]:
                                     all_required_repo_info_dict[ k ] = v
                         if required_repo_info_dicts:
                             for required_repo_info_dict in required_repo_info_dicts:
                                 # Each required_repo_info_dict has a single entry, and all_repo_info_dicts is a list
                                 # of dictionaries, each of which has a single entry.  We'll check keys here rather than
                                 # the entire dictionary because a dictionary entry in all_repo_info_dicts will include
                                 # lists of discovered repository dependencies, but these lists will be empty in the
                                 # required_repo_info_dict since dependency discovery has not yet been performed for these
                                 # dictionaries.
                                 required_repo_info_dict_key = required_repo_info_dict.keys()[ 0 ]
                                 all_repo_info_dicts_keys = [ d.keys()[ 0 ] for d in all_repo_info_dicts ]
                                 if required_repo_info_dict_key not in all_repo_info_dicts_keys:
                                     all_repo_info_dicts.append( required_repo_info_dict )
                     all_required_repo_info_dict[ 'all_repo_info_dicts' ] = all_repo_info_dicts
Esempio n. 57
0
 def is_deprecated_in_tool_shed(self):
     if self.tool_shed_status:
         return asbool(
             self.tool_shed_status.get('repository_deprecated', False))
     return False
Esempio n. 58
0
def paste_app_factory(global_conf, **kwargs):
    """
    Return a wsgi application serving the root object
    """
    kwargs = load_app_properties(kwds=kwargs)
    # Create the Galaxy application unless passed in
    if 'app' in kwargs:
        app = kwargs.pop('app')
        galaxy.app.app = app
    else:
        try:
            app = galaxy.app.UniverseApplication(global_conf=global_conf,
                                                 **kwargs)
            galaxy.app.app = app
        except:
            import traceback
            traceback.print_exc()
            sys.exit(1)
    # Call app's shutdown method when the interpeter exits, this cleanly stops
    # the various Galaxy application daemon threads
    atexit.register(app.shutdown)
    # Create the universe WSGI application
    webapp = GalaxyWebApplication(app,
                                  session_cookie='galaxysession',
                                  name='galaxy')

    # CLIENTSIDE ROUTES
    # The following are routes that are handled completely on the clientside.
    # The following routes don't bootstrap any information, simply provide the
    # base analysis interface at which point the application takes over.

    webapp.add_client_route('/tours')
    webapp.add_client_route('/tours/{tour_id}')

    # STANDARD CONTROLLER ROUTES
    webapp.add_ui_controllers('galaxy.webapps.galaxy.controllers', app)
    # Force /history to go to view of current
    webapp.add_route('/history', controller='history', action='view')
    webapp.add_route('/history/view/{id}', controller='history', action='view')
    # Force /activate to go to the controller
    webapp.add_route('/activate', controller='user', action='activate')
    webapp.add_route('/login', controller='root', action='login')

    # These two routes handle our simple needs at the moment
    webapp.add_route('/async/{tool_id}/{data_id}/{data_secret}',
                     controller='async',
                     action='index',
                     tool_id=None,
                     data_id=None,
                     data_secret=None)
    webapp.add_route('/{controller}/{action}', action='index')
    webapp.add_route('/{action}', controller='root', action='index')

    # allow for subdirectories in extra_files_path
    webapp.add_route('/datasets/{dataset_id}/display/{filename:.+?}',
                     controller='dataset',
                     action='display',
                     dataset_id=None,
                     filename=None)
    webapp.add_route('/datasets/{dataset_id}/{action}/{filename}',
                     controller='dataset',
                     action='index',
                     dataset_id=None,
                     filename=None)
    webapp.add_route(
        '/display_application/{dataset_id}/{app_name}/{link_name}/{user_id}/{app_action}/{action_param}/{action_param_extra:.+?}',
        controller='dataset',
        action='display_application',
        dataset_id=None,
        user_id=None,
        app_name=None,
        link_name=None,
        app_action=None,
        action_param=None,
        action_param_extra=None)
    webapp.add_route('/u/{username}/d/{slug}/{filename}',
                     controller='dataset',
                     action='display_by_username_and_slug',
                     filename=None)
    webapp.add_route('/u/{username}/p/{slug}',
                     controller='page',
                     action='display_by_username_and_slug')
    webapp.add_route('/u/{username}/h/{slug}',
                     controller='history',
                     action='display_by_username_and_slug')
    webapp.add_route('/u/{username}/w/{slug}',
                     controller='workflow',
                     action='display_by_username_and_slug')
    webapp.add_route('/u/{username}/w/{slug}/{format}',
                     controller='workflow',
                     action='display_by_username_and_slug')
    webapp.add_route('/u/{username}/v/{slug}',
                     controller='visualization',
                     action='display_by_username_and_slug')
    webapp.add_route('/search', controller='search', action='index')

    # TODO: Refactor above routes into external method to allow testing in
    # isolation as well.
    populate_api_routes(webapp, app)

    # ==== Done
    # Indicate that all configuration settings have been provided
    webapp.finalize_config()

    # Wrap the webapp in some useful middleware
    if kwargs.get('middleware', True):
        webapp = wrap_in_middleware(webapp, global_conf, **kwargs)
    if asbool(kwargs.get('static_enabled', True)):
        if process_is_uwsgi:
            log.error(
                "Static middleware is enabled in your configuration but this is a uwsgi process.  Refusing to wrap in static middleware."
            )
        else:
            webapp = wrap_in_static(
                webapp,
                global_conf,
                plugin_frameworks=[app.visualizations_registry],
                **kwargs)
    # Close any pooled database connections before forking
    try:
        galaxy.model.mapping.metadata.bind.dispose()
    except:
        log.exception(
            "Unable to dispose of pooled galaxy model database connections.")
    try:
        # This model may not actually be bound.
        if galaxy.model.tool_shed_install.mapping.metadata.bind:
            galaxy.model.tool_shed_install.mapping.metadata.bind.dispose()
    except:
        log.exception(
            "Unable to dispose of pooled toolshed install model database connections."
        )

    register_postfork_function(postfork_setup)

    for th in threading.enumerate():
        if th.is_alive():
            log.debug("Prior to webapp return, Galaxy thread %s is alive.", th)
    # Return
    return webapp
Esempio n. 59
0
    def queue_job(self, job_wrapper):
        """Create job script and submit it to the DRM"""
        # prepare the job

        # external_runJob_script can be None, in which case it's not used.
        external_runjob_script = job_wrapper.get_destination_configuration(
            "drmaa_external_runjob_script", None)

        include_metadata = asbool(
            job_wrapper.job_destination.params.get("embed_metadata_in_job",
                                                   True))
        if not self.prepare_job(job_wrapper,
                                include_metadata=include_metadata):
            return

        # get configured job destination
        job_destination = job_wrapper.job_destination

        # wrapper.get_id_tag() instead of job_id for compatibility with TaskWrappers.
        galaxy_id_tag = job_wrapper.get_id_tag()

        job_name = self._job_name(job_wrapper)
        ajs = AsynchronousJobState(files_dir=job_wrapper.working_directory,
                                   job_wrapper=job_wrapper,
                                   job_name=job_name)

        # set up the drmaa job template
        jt = dict(remoteCommand=ajs.job_file,
                  jobName=ajs.job_name,
                  workingDirectory=job_wrapper.working_directory,
                  outputPath=":%s" % ajs.output_file,
                  errorPath=":%s" % ajs.error_file)

        # Avoid a jt.exitCodePath for now - it's only used when finishing.
        native_spec = job_destination.params.get('nativeSpecification', None)
        if native_spec is not None:
            jt['nativeSpecification'] = native_spec

        # fill in the DRM's job run template
        script = self.get_job_file(job_wrapper,
                                   exit_code_path=ajs.exit_code_file)
        try:
            self.write_executable_script(ajs.job_file, script)
        except Exception:
            job_wrapper.fail("failure preparing job script", exception=True)
            log.exception("(%s) failure writing job script" % galaxy_id_tag)
            return

        # job was deleted while we were preparing it
        if job_wrapper.get_state() == model.Job.states.DELETED:
            log.debug("(%s) Job deleted by user before it entered the queue" %
                      galaxy_id_tag)
            if job_wrapper.cleanup_job in ("always", "onsuccess"):
                job_wrapper.cleanup()
            return

        log.debug("(%s) submitting file %s", galaxy_id_tag, ajs.job_file)
        if native_spec:
            log.debug("(%s) native specification is: %s", galaxy_id_tag,
                      native_spec)

        # runJob will raise if there's a submit problem
        if external_runjob_script is None:
            # TODO: create a queue for retrying submission indefinitely
            # TODO: configurable max tries and sleep
            trynum = 0
            external_job_id = None
            fail_msg = None
            while external_job_id is None and trynum < 5:
                try:
                    external_job_id = self.ds.run_job(**jt)
                    break
                except (drmaa.InternalException,
                        drmaa.DeniedByDrmException) as e:
                    trynum += 1
                    log.warning(
                        '(%s) drmaa.Session.runJob() failed, will retry: %s',
                        galaxy_id_tag, e)
                    fail_msg = "Unable to run this job due to a cluster error, please retry it later"
                    time.sleep(5)
                except Exception:
                    log.exception(
                        '(%s) drmaa.Session.runJob() failed unconditionally',
                        galaxy_id_tag)
                    trynum = 5
            else:
                log.error("(%s) All attempts to submit job failed" %
                          galaxy_id_tag)
                if not fail_msg:
                    fail_msg = DEFAULT_JOB_PUT_FAILURE_MESSAGE
                job_wrapper.fail(fail_msg)
                return
        else:
            job_wrapper.change_ownership_for_run()
            # if user credentials are not available, use galaxy credentials (if permitted)
            allow_guests = asbool(
                job_wrapper.job_destination.params.get("allow_guests", False))
            pwent = job_wrapper.user_system_pwent
            if pwent is None:
                if not allow_guests:
                    fail_msg = "User %s is not mapped to any real user, and not permitted to start jobs." % job_wrapper.user
                    job_wrapper.fail(fail_msg)
                    return
                pwent = job_wrapper.galaxy_system_pwent
            log.debug('(%s) submitting with credentials: %s [uid: %s]' %
                      (galaxy_id_tag, pwent[0], pwent[2]))
            filename = self.store_jobtemplate(job_wrapper, jt)
            self.userid = pwent[2]
            external_job_id = self.external_runjob(external_runjob_script,
                                                   filename, pwent[2]).strip()
        log.info("(%s) queued as %s" % (galaxy_id_tag, external_job_id))

        # store runner information for tracking if Galaxy restarts
        job_wrapper.set_job_destination(job_destination, external_job_id)

        # Store DRM related state information for job
        ajs.job_id = external_job_id
        ajs.old_state = 'new'
        ajs.job_destination = job_destination

        # Add to our 'queue' of jobs to monitor
        self.monitor_queue.put(ajs)
Esempio n. 60
0
 def check_weberror(self):
     return (asbool(self.config["debug"])
             and asbool(self.config["use_interactive"]))