Ejemplo n.º 1
0
 def apply_query_filter(self, trans, query, **kwd):
     current_user_role_ids = [
         role.id for role in trans.get_current_user_roles()
     ]
     library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
     restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
                                                                        .filter( trans.model.LibraryPermissions.table.c.action == library_access_action ) \
                                                                        .distinct() ]
     accessible_restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
                                                                                   .filter( and_( trans.model.LibraryPermissions.table.c.action == library_access_action,
                                                                                                  trans.model.LibraryPermissions.table.c.role_id.in_( current_user_role_ids ) ) ) ]
     if not trans.user:
         # Filter to get only public libraries, a library whose id
         # is not in restricted_library_ids is a public library
         return query.filter(
             not_(trans.model.Library.table.c.id.in_(
                 restricted_library_ids)))
     else:
         # Filter to get libraries accessible by the current user, get both
         # public libraries and restricted libraries accessible by the current user.
         return query.filter(
             or_(
                 not_(
                     trans.model.Library.table.c.id.in_(
                         restricted_library_ids)),
                 trans.model.Library.table.c.id.in_(
                     accessible_restricted_library_ids)))
Ejemplo n.º 2
0
 def apply_query_filter(self, trans, query, **kwd):
     current_user_role_ids = [role.id for role in trans.get_current_user_roles()]
     library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
     restricted_library_ids = [
         lp.library_id
         for lp in trans.sa_session.query(trans.model.LibraryPermissions)
         .filter(trans.model.LibraryPermissions.table.c.action == library_access_action)
         .distinct()
     ]
     accessible_restricted_library_ids = [
         lp.library_id
         for lp in trans.sa_session.query(trans.model.LibraryPermissions).filter(
             and_(
                 trans.model.LibraryPermissions.table.c.action == library_access_action,
                 trans.model.LibraryPermissions.table.c.role_id.in_(current_user_role_ids),
             )
         )
     ]
     if not trans.user:
         # Filter to get only public libraries, a library whose id
         # is not in restricted_library_ids is a public library
         return query.filter(not_(trans.model.Library.table.c.id.in_(restricted_library_ids)))
     else:
         # Filter to get libraries accessible by the current user, get both
         # public libraries and restricted libraries accessible by the current user.
         return query.filter(
             or_(
                 not_(trans.model.Library.table.c.id.in_(restricted_library_ids)),
                 trans.model.Library.table.c.id.in_(accessible_restricted_library_ids),
             )
         )
Ejemplo n.º 3
0
 def index( self, trans, deleted='False', **kwd ):
     """
     GET /api/libraries
     GET /api/libraries/deleted
     Displays a collection (list) of libraries.
     """
     log.debug( "LibrariesController.index: enter" )
     query = trans.sa_session.query( trans.app.model.Library )
     deleted = util.string_as_bool( deleted )
     if deleted:
         route = 'deleted_library'
         query = query.filter( trans.app.model.Library.table.c.deleted == True )
     else:
         route = 'library'
         query = query.filter( trans.app.model.Library.table.c.deleted == False )
     current_user_role_ids = [ role.id for role in trans.get_current_user_roles() ]
     library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
     restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
                                                                        .filter( trans.model.LibraryPermissions.table.c.action == library_access_action ) \
                                                                        .distinct() ]
     accessible_restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
                                                                                   .filter( and_( trans.model.LibraryPermissions.table.c.action == library_access_action,
                                                                                                  trans.model.LibraryPermissions.table.c.role_id.in_( current_user_role_ids ) ) ) ]
     query = query.filter( or_( not_( trans.model.Library.table.c.id.in_( restricted_library_ids ) ),
                        trans.model.Library.table.c.id.in_( accessible_restricted_library_ids ) ) )
     rval = []
     for library in query:
         item = library.get_api_value()
         item['url'] = url_for( route, id=trans.security.encode_id( library.id ) )
         item['id'] = trans.security.encode_id( item['id'] )
         rval.append( item )
     return rval
Ejemplo n.º 4
0
    def list( self, trans, deleted=False ):
        """
        Return a list of libraries from the DB.

        :param  deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted``
        :type   deleted: boolean (optional)

        :returns: query that will emit all accessible libraries
        :rtype: sqlalchemy query
        """
        is_admin = trans.user_is_admin()
        query = trans.sa_session.query( trans.app.model.Library )

        if is_admin:
            if deleted is None:
                #  Flag is not specified, do not filter on it.
                pass
            elif deleted:
                query = query.filter( trans.app.model.Library.table.c.deleted == True ) 
            else:
                query = query.filter( trans.app.model.Library.table.c.deleted == False )
        else:
            #  Nonadmins can't see deleted libraries
            current_user_role_ids = [ role.id for role in trans.get_current_user_roles() ]
            library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
            restricted_library_ids = [ lp.library_id for lp in ( trans.sa_session.query( trans.model.LibraryPermissions )
                                                                 .filter( trans.model.LibraryPermissions.table.c.action == library_access_action )
                                                                 .distinct() ) ]
            accessible_restricted_library_ids = [ lp.library_id for lp in ( trans.sa_session.query( trans.model.LibraryPermissions )
                                                  .filter( and_( trans.model.LibraryPermissions.table.c.action == library_access_action,
                                                                 trans.model.LibraryPermissions.table.c.role_id.in_( current_user_role_ids ) ) ) ) ]
            query = query.filter( or_( not_( trans.model.Library.table.c.id.in_( restricted_library_ids ) ), trans.model.Library.table.c.id.in_( accessible_restricted_library_ids ) ) )
        return query
Ejemplo n.º 5
0
    def index( self, trans, **kwd ):
        """
        index( self, trans, **kwd )
        * GET /api/libraries:
            Returns a list of summary data for all libraries.

        :param  deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted``
        :type   deleted: boolean (optional)

        :returns:   list of dictionaries containing library information
        :rtype:     list

        .. seealso:: :attr:`galaxy.model.Library.dict_collection_visible_keys`

        """
        is_admin = trans.user_is_admin()
        query = trans.sa_session.query( trans.app.model.Library )
        deleted = kwd.get( 'deleted', 'missing' )
        try:
            if not is_admin:
                # non-admins can't see deleted libraries
                deleted = False
            else:
                deleted = util.asbool( deleted )
            if deleted:
                query = query.filter( trans.app.model.Library.table.c.deleted == True )
            else:
                query = query.filter( trans.app.model.Library.table.c.deleted == False )
        except ValueError:
            # given value wasn't true/false but the user is admin so we don't filter on this parameter at all
            pass

        if not is_admin:
            # non-admins can see only allowed and public libraries
            current_user_role_ids = [ role.id for role in trans.get_current_user_roles() ]
            library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
            restricted_library_ids = [ lp.library_id for lp in ( trans.sa_session.query( trans.model.LibraryPermissions )
                                                                 .filter( trans.model.LibraryPermissions.table.c.action == library_access_action )
                                                                 .distinct() ) ]
            accessible_restricted_library_ids = [ lp.library_id for lp in ( trans.sa_session.query( trans.model.LibraryPermissions )
                                                  .filter( and_( trans.model.LibraryPermissions.table.c.action == library_access_action,
                                                                 trans.model.LibraryPermissions.table.c.role_id.in_( current_user_role_ids ) ) ) ) ]
            query = query.filter( or_( not_( trans.model.Library.table.c.id.in_( restricted_library_ids ) ), trans.model.Library.table.c.id.in_( accessible_restricted_library_ids ) ) )
        libraries = []
        for library in query:
            item = library.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'root_folder_id': trans.security.encode_id } )
            if trans.app.security_agent.library_is_public( library, contents=False ):
                item[ 'public' ] = True
            current_user_roles = trans.get_current_user_roles()
            if not trans.user_is_admin():
                item['can_user_add'] = trans.app.security_agent.can_add_library_item( current_user_roles, library )
                item['can_user_modify'] = trans.app.security_agent.can_modify_library_item( current_user_roles, library )
                item['can_user_manage'] = trans.app.security_agent.can_manage_library_item( current_user_roles, library )
            else:
                item['can_user_add'] = True
                item['can_user_modify'] = True
                item['can_user_manage'] = True
            libraries.append( item )
        return libraries
Ejemplo n.º 6
0
 def index( self, trans, **kwd ):
     """
     GET /api/repository_revisions
     Displays a collection (list) of repository revisions.
     """
     # Example URL: http://localhost:9009/api/repository_revisions
     repository_metadata_dicts = []
     # Build up an anded clause list of filters.
     clause_list = []
     # Filter by downloadable if received.
     downloadable =  kwd.get( 'downloadable', None )
     if downloadable is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.downloadable == util.asbool( downloadable ) )
     # Filter by malicious if received.
     malicious =  kwd.get( 'malicious', None )
     if malicious is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.malicious == util.asbool( malicious ) )
     # Filter by tools_functionally_correct if received.
     tools_functionally_correct = kwd.get( 'tools_functionally_correct', None )
     if tools_functionally_correct is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.tools_functionally_correct == util.asbool( tools_functionally_correct ) )
     # Filter by missing_test_components if received.
     missing_test_components = kwd.get( 'missing_test_components', None )
     if missing_test_components is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.missing_test_components == util.asbool( missing_test_components ) )
     # Filter by do_not_test if received.
     do_not_test = kwd.get( 'do_not_test', None )
     if do_not_test is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.do_not_test == util.asbool( do_not_test ) )
     # Filter by includes_tools if received.
     includes_tools = kwd.get( 'includes_tools', None )
     if includes_tools is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.includes_tools == util.asbool( includes_tools ) )
     # Filter by test_install_error if received.
     test_install_error = kwd.get( 'test_install_error', None )
     if test_install_error is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.test_install_error == util.asbool( test_install_error ) )
     # Filter by skip_tool_test if received.
     skip_tool_test = kwd.get( 'skip_tool_test', None )
     if skip_tool_test is not None:
         skip_tool_test = util.asbool( skip_tool_test )
         skipped_metadata_ids_subquery = select( [ trans.app.model.SkipToolTest.table.c.repository_metadata_id ] )
         if skip_tool_test:
             clause_list.append( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) )
         else:
             clause_list.append( not_( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) ) )
     for repository_metadata in trans.sa_session.query( trans.app.model.RepositoryMetadata ) \
                                                .filter( and_( *clause_list ) ) \
                                                .order_by( trans.app.model.RepositoryMetadata.table.c.repository_id.desc() ):
         repository_metadata_dict = repository_metadata.to_dict( view='collection',
                                                                 value_mapper=self.__get_value_mapper( trans ) )
         repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',
                                                          action='show',
                                                          id=trans.security.encode_id( repository_metadata.id ) )
         repository_metadata_dicts.append( repository_metadata_dict )
     return repository_metadata_dicts
Ejemplo n.º 7
0
 def filter(self, trans, user, query, column_filter):
     if column_filter == 'Unfinished':
         return query.filter(
             not_(
                 or_(
                     model.Job.table.c.state == model.Job.states.OK,
                     model.Job.table.c.state == model.Job.states.ERROR,
                     model.Job.table.c.state ==
                     model.Job.states.DELETED)))
     return query
def deprecate_repositories( app, cutoff_time, days=14, info_only=False, verbose=False ):
    # This method will get a list of repositories that were created on or before cutoff_time, but have never
    # had any metadata records associated with them. Then it will iterate through that list and deprecate the
    # repositories, sending an email to each repository owner.
    dataset_count = 0
    disk_space = 0
    start = time.time()
    repository_ids_to_not_check = []
    # Get a unique list of repository ids from the repository_metadata table. Any repository ID found in this table is not
    # empty, and will not be checked.
    metadata_records = sa.select( [ distinct( app.model.RepositoryMetadata.table.c.repository_id ) ],
                                  from_obj=app.model.RepositoryMetadata.table ) \
                         .execute()
    for metadata_record in metadata_records:
        repository_ids_to_not_check.append( metadata_record.repository_id )
    # Get the repositories that are A) not present in the above list, and b) older than the specified time.
    # This will yield a list of repositories that have been created more than n days ago, but never populated.
    repository_query = sa.select( [ app.model.Repository.table.c.id ],
                                  whereclause = and_( app.model.Repository.table.c.create_time < cutoff_time,
                                                      app.model.Repository.table.c.deprecated == False,
                                                      app.model.Repository.table.c.deleted == False,
                                                      not_( app.model.Repository.table.c.id.in_( repository_ids_to_not_check ) ) ),
                                  from_obj = [ app.model.Repository.table ] )
    query_result = repository_query.execute()
    repositories = []
    repositories_by_owner = {}
    repository_ids = [ row.id for row in query_result ]
    # Iterate through the list of repository ids for empty repositories and deprecate them unless info_only is set.
    for repository_id in repository_ids:
        repository = app.sa_session.query( app.model.Repository ) \
                               .filter( app.model.Repository.table.c.id == repository_id ) \
                               .one()
        owner = repository.user
        if info_only:
            print '# Repository %s owned by %s would have been deprecated, but info_only was set.' % ( repository.name, repository.user.username )
        else:
            if verbose:
                print '# Deprecating repository %s owned by %s.' % ( repository.name, owner.username )
            if owner.username not in repositories_by_owner:
                repositories_by_owner[ owner.username ] = dict( owner=owner, repositories=[] )
            repositories_by_owner[ owner.username ][ 'repositories' ].append( repository )
            repositories.append( repository )
    # Send an email to each repository owner, listing the repositories that were deprecated.
    for repository_owner in repositories_by_owner:
        for repository in repositories_by_owner[ repository_owner ][ 'repositories' ]:
            repository.deprecated = True
            app.sa_session.add( repository )
            app.sa_session.flush()
        owner = repositories_by_owner[ repository_owner ][ 'owner' ]
        send_mail_to_owner( app, repository.name, owner.username, owner.email, repositories_by_owner[ repository_owner ][ 'repositories' ], days )
    stop = time.time()
    print '# Deprecated %d repositories.' % len( repositories )
    print "# Elapsed time: ", stop - start
    print "####################################################################################"
Ejemplo n.º 9
0
def deprecate_repositories( app, cutoff_time, days=14, info_only=False, verbose=False ):
    # This method will get a list of repositories that were created on or before cutoff_time, but have never
    # had any metadata records associated with them. Then it will iterate through that list and deprecate the
    # repositories, sending an email to each repository owner.
    dataset_count = 0
    disk_space = 0
    start = time.time()
    repository_ids_to_not_check = []
    # Get a unique list of repository ids from the repository_metadata table. Any repository ID found in this table is not
    # empty, and will not be checked.
    metadata_records = sa.select( [ distinct( app.model.RepositoryMetadata.table.c.repository_id ) ], 
                                  from_obj=app.model.RepositoryMetadata.table ) \
                         .execute()
    for metadata_record in metadata_records:
        repository_ids_to_not_check.append( metadata_record.repository_id )
    # Get the repositories that are A) not present in the above list, and b) older than the specified time.
    # This will yield a list of repositories that have been created more than n days ago, but never populated.
    repository_query = sa.select( [ app.model.Repository.table.c.id ],
                                  whereclause = and_( app.model.Repository.table.c.create_time < cutoff_time,
                                                      app.model.Repository.table.c.deprecated == False,
                                                      app.model.Repository.table.c.deleted == False,
                                                      not_( app.model.Repository.table.c.id.in_( repository_ids_to_not_check ) ) ),
                                  from_obj = [ app.model.Repository.table ] )
    query_result = repository_query.execute()
    repositories = []
    repositories_by_owner = {}
    repository_ids = [ row.id for row in query_result ]
    # Iterate through the list of repository ids for empty repositories and deprecate them unless info_only is set.
    for repository_id in repository_ids:
        repository = app.sa_session.query( app.model.Repository ) \
                               .filter( app.model.Repository.table.c.id == repository_id ) \
                               .one()
        owner = repository.user
        if info_only:
            print '# Repository %s owned by %s would have been deprecated, but info_only was set.' % ( repository.name, repository.user.username )
        else:
            if verbose:
                print '# Deprecating repository %s owned by %s.' % ( repository.name, owner.username )
            if owner.username not in repositories_by_owner:
                repositories_by_owner[ owner.username ] = dict( owner=owner, repositories=[] )
            repositories_by_owner[ owner.username ][ 'repositories' ].append( repository )
            repositories.append( repository )
    # Send an email to each repository owner, listing the repositories that were deprecated.
    for repository_owner in repositories_by_owner:
        for repository in repositories_by_owner[ repository_owner ][ 'repositories' ]:
            repository.deprecated = True
            app.sa_session.add( repository )
            app.sa_session.flush()
        owner = repositories_by_owner[ repository_owner ][ 'owner' ]
        send_mail_to_owner( app, repository.name, owner.username, owner.email, repositories_by_owner[ repository_owner ][ 'repositories' ], days )
    stop = time.time()
    print '# Deprecated %d repositories.' % len( repositories )
    print "# Elapsed time: ", stop - start
    print "####################################################################################" 
Ejemplo n.º 10
0
    def list(self, trans, deleted=False):
        """
        Return a list of libraries from the DB.

        :param  deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted``
        :type   deleted: boolean (optional)
        """
        is_admin = trans.user_is_admin()
        query = trans.sa_session.query(trans.app.model.Library)

        if is_admin:
            if deleted is None:
                #  Flag is not specified, do not filter on it.
                pass
            elif deleted:
                query = query.filter(
                    trans.app.model.Library.table.c.deleted == True)
            else:
                query = query.filter(
                    trans.app.model.Library.table.c.deleted == False)
        else:
            #  Nonadmins can't see deleted libraries
            current_user_role_ids = [
                role.id for role in trans.get_current_user_roles()
            ]
            library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
            restricted_library_ids = [
                lp.library_id for lp in (trans.sa_session.query(
                    trans.model.LibraryPermissions).filter(
                        trans.model.LibraryPermissions.table.c.action ==
                        library_access_action).distinct())
            ]
            accessible_restricted_library_ids = [
                lp.library_id for lp in (trans.sa_session.query(
                    trans.model.LibraryPermissions).filter(
                        and_(
                            trans.model.LibraryPermissions.table.c.action ==
                            library_access_action,
                            trans.model.LibraryPermissions.table.c.role_id.in_(
                                current_user_role_ids))))
            ]
            query = query.filter(
                or_(
                    not_(
                        trans.model.Library.table.c.id.in_(
                            restricted_library_ids)),
                    trans.model.Library.table.c.id.in_(
                        accessible_restricted_library_ids)))
        return query
Ejemplo n.º 11
0
    def index( self, trans, deleted='False', **kwd ):
        """
        index( self, trans, deleted='False', **kwd )
        * GET /api/libraries:
            returns a list of summary data for libraries
        * GET /api/libraries/deleted:
            returns a list of summary data for deleted libraries

        :type   deleted: boolean
        :param  deleted: if True, show only deleted libraries, if False, non-deleted

        :rtype:     list
        :returns:   list of dictionaries containing library information
        .. seealso:: :attr:`galaxy.model.Library.dict_collection_visible_keys`
        """
#         log.debug( "LibrariesController.index: enter" )
        query = trans.sa_session.query( trans.app.model.Library )
        deleted = util.string_as_bool( deleted )
        if deleted:
            route = 'deleted_library'
            query = query.filter( trans.app.model.Library.table.c.deleted == True )
        else:
            route = 'library'
            query = query.filter( trans.app.model.Library.table.c.deleted == False )
        current_user_role_ids = [ role.id for role in trans.get_current_user_roles() ]
        library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
        restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
                                                                           .filter( trans.model.LibraryPermissions.table.c.action == library_access_action ) \
                                                                           .distinct() ]
        accessible_restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
                                                                                      .filter( and_( trans.model.LibraryPermissions.table.c.action == library_access_action,
                                                                                                     trans.model.LibraryPermissions.table.c.role_id.in_( current_user_role_ids ) ) ) ]
        query = query.filter( or_( not_( trans.model.Library.table.c.id.in_( restricted_library_ids ) ),
                           trans.model.Library.table.c.id.in_( accessible_restricted_library_ids ) ) )
        rval = []
        for library in query:
            item = library.to_dict()
            item['url'] = url_for( route, id=trans.security.encode_id( library.id ) )
            item['id'] = trans.security.encode_id( item['id'] )
            rval.append( item )
        return rval
Ejemplo n.º 12
0
 def index(self, trans, deleted='False', **kwd):
     """
     GET /api/libraries
     GET /api/libraries/deleted
     Displays a collection (list) of libraries.
     """
     log.debug("LibrariesController.index: enter")
     query = trans.sa_session.query(trans.app.model.Library)
     deleted = util.string_as_bool(deleted)
     if deleted:
         route = 'deleted_library'
         query = query.filter(
             trans.app.model.Library.table.c.deleted == True)
     else:
         route = 'library'
         query = query.filter(
             trans.app.model.Library.table.c.deleted == False)
     current_user_role_ids = [
         role.id for role in trans.get_current_user_roles()
     ]
     library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
     restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
                                                                        .filter( trans.model.LibraryPermissions.table.c.action == library_access_action ) \
                                                                        .distinct() ]
     accessible_restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
                                                                                   .filter( and_( trans.model.LibraryPermissions.table.c.action == library_access_action,
                                                                                                  trans.model.LibraryPermissions.table.c.role_id.in_( current_user_role_ids ) ) ) ]
     query = query.filter(
         or_(
             not_(trans.model.Library.table.c.id.in_(
                 restricted_library_ids)),
             trans.model.Library.table.c.id.in_(
                 accessible_restricted_library_ids)))
     rval = []
     for library in query:
         item = library.get_api_value()
         item['url'] = url_for(route,
                               id=trans.security.encode_id(library.id))
         item['id'] = trans.security.encode_id(item['id'])
         rval.append(item)
     return rval
Ejemplo n.º 13
0
 def filter( self, trans, user, query, column_filter ):
     if column_filter == 'Unfinished':
         return query.filter( not_( or_( model.Job.table.c.state == model.Job.states.OK,
                                         model.Job.table.c.state == model.Job.states.ERROR,
                                         model.Job.table.c.state == model.Job.states.DELETED ) ) )
     return query
Ejemplo n.º 14
0
 def index(self, trans, **kwd):
     """
     GET /api/repository_revisions
     Displays a collection (list) of repository revisions.
     """
     # Example URL: http://localhost:9009/api/repository_revisions
     repository_metadata_dicts = []
     # Build up an anded clause list of filters.
     clause_list = []
     # Filter by downloadable if received.
     downloadable = kwd.get('downloadable', None)
     if downloadable is not None:
         clause_list.append(
             trans.model.RepositoryMetadata.table.c.downloadable ==
             util.string_as_bool(downloadable))
     # Filter by malicious if received.
     malicious = kwd.get('malicious', None)
     if malicious is not None:
         clause_list.append(trans.model.RepositoryMetadata.table.c.malicious
                            == util.string_as_bool(malicious))
     # Filter by tools_functionally_correct if received.
     tools_functionally_correct = kwd.get('tools_functionally_correct',
                                          None)
     if tools_functionally_correct is not None:
         clause_list.append(trans.model.RepositoryMetadata.table.c.
                            tools_functionally_correct ==
                            util.string_as_bool(tools_functionally_correct))
     # Filter by missing_test_components if received.
     missing_test_components = kwd.get('missing_test_components', None)
     if missing_test_components is not None:
         clause_list.append(
             trans.model.RepositoryMetadata.table.c.missing_test_components
             == util.string_as_bool(missing_test_components))
     # Filter by do_not_test if received.
     do_not_test = kwd.get('do_not_test', None)
     if do_not_test is not None:
         clause_list.append(trans.model.RepositoryMetadata.table.c.
                            do_not_test == util.string_as_bool(do_not_test))
     # Filter by includes_tools if received.
     includes_tools = kwd.get('includes_tools', None)
     if includes_tools is not None:
         clause_list.append(
             trans.model.RepositoryMetadata.table.c.includes_tools ==
             util.string_as_bool(includes_tools))
     # Filter by test_install_error if received.
     test_install_error = kwd.get('test_install_error', None)
     if test_install_error is not None:
         clause_list.append(
             trans.model.RepositoryMetadata.table.c.test_install_error ==
             util.string_as_bool(test_install_error))
     # Filter by skip_tool_test if received.
     skip_tool_test = kwd.get('skip_tool_test', None)
     if skip_tool_test is not None:
         skip_tool_test = util.string_as_bool(skip_tool_test)
         skipped_metadata_ids_subquery = select(
             [trans.app.model.SkipToolTest.table.c.repository_metadata_id])
         if skip_tool_test:
             clause_list.append(
                 trans.model.RepositoryMetadata.id.in_(
                     skipped_metadata_ids_subquery))
         else:
             clause_list.append(
                 not_(
                     trans.model.RepositoryMetadata.id.in_(
                         skipped_metadata_ids_subquery)))
     # Generate and execute the query.
     try:
         query = trans.sa_session.query( trans.app.model.RepositoryMetadata ) \
                                 .filter( and_( *clause_list ) ) \
                                 .order_by( trans.app.model.RepositoryMetadata.table.c.repository_id ) \
                                 .all()
         for repository_metadata in query:
             repository_metadata_dict = repository_metadata.get_api_value(
                 view='collection',
                 value_mapper=default_value_mapper(trans,
                                                   repository_metadata))
             repository_metadata_dict['url'] = web.url_for(
                 controller='repository_revisions',
                 action='show',
                 id=trans.security.encode_id(repository_metadata.id))
             repository_metadata_dicts.append(repository_metadata_dict)
         return repository_metadata_dicts
     except Exception, e:
         message = "Error in the Tool Shed repository_revisions API in index: " + str(
             e)
         log.error(message, exc_info=True)
         trans.response.status = 500
         return message
Ejemplo n.º 15
0
    def index(self, trans, **kwd):
        """
        index( self, trans, **kwd )
        * GET /api/libraries:
            Returns a list of summary data for all libraries.

        :param  deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted``
        :type   deleted: boolean (optional)

        :returns:   list of dictionaries containing library information
        :rtype:     list

        .. seealso:: :attr:`galaxy.model.Library.dict_collection_visible_keys`

        """
        query = trans.sa_session.query(trans.app.model.Library)
        deleted = kwd.get('deleted', 'missing')
        try:
            if not trans.user_is_admin():
                # non-admins can't see deleted libraries
                deleted = False
            else:
                deleted = util.asbool(deleted)
            if deleted:
                query = query.filter(
                    trans.app.model.Library.table.c.deleted == True)
            else:
                query = query.filter(
                    trans.app.model.Library.table.c.deleted == False)
        except ValueError:
            # given value wasn't true/false but the user is admin so we don't filter on this parameter at all
            pass

        current_user_role_ids = [
            role.id for role in trans.get_current_user_roles()
        ]
        library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
        restricted_library_ids = [
            lp.library_id for lp in (
                trans.sa_session.query(trans.model.LibraryPermissions).filter(
                    trans.model.LibraryPermissions.table.c.action ==
                    library_access_action).distinct())
        ]
        accessible_restricted_library_ids = [
            lp.library_id for lp in (
                trans.sa_session.query(trans.model.LibraryPermissions).filter(
                    and_(
                        trans.model.LibraryPermissions.table.c.action ==
                        library_access_action,
                        trans.model.LibraryPermissions.table.c.role_id.in_(
                            current_user_role_ids))))
        ]
        query = query.filter(
            or_(
                not_(trans.model.Library.table.c.id.in_(
                    restricted_library_ids)),
                trans.model.Library.table.c.id.in_(
                    accessible_restricted_library_ids)))
        libraries = []
        for library in query:
            item = library.to_dict(view='element',
                                   value_mapper={
                                       'id': trans.security.encode_id,
                                       'root_folder_id':
                                       trans.security.encode_id
                                   })
            if trans.app.security_agent.library_is_public(library,
                                                          contents=False):
                item['public'] = True
            current_user_roles = trans.get_current_user_roles()
            if not trans.user_is_admin():
                item[
                    'can_user_add'] = trans.app.security_agent.can_add_library_item(
                        current_user_roles, library)
                item[
                    'can_user_modify'] = trans.app.security_agent.can_modify_library_item(
                        current_user_roles, library)
                item[
                    'can_user_manage'] = trans.app.security_agent.can_manage_library_item(
                        current_user_roles, library)
            else:
                item['can_user_add'] = True
                item['can_user_modify'] = True
                item['can_user_manage'] = True
            libraries.append(item)
        return libraries
 def index(self, trans, **kwd):
     """
     GET /api/repository_revisions
     Displays a collection (list) of repository revisions.
     """
     # Example URL: http://localhost:9009/api/repository_revisions
     repository_metadata_dicts = []
     # Build up an anded clause list of filters.
     clause_list = []
     # Filter by downloadable if received.
     downloadable = kwd.get('downloadable', None)
     if downloadable is not None:
         clause_list.append(trans.model.RepositoryMetadata.table.c.
                            downloadable == util.asbool(downloadable))
     # Filter by malicious if received.
     malicious = kwd.get('malicious', None)
     if malicious is not None:
         clause_list.append(trans.model.RepositoryMetadata.table.c.malicious
                            == util.asbool(malicious))
     # Filter by tools_functionally_correct if received.
     tools_functionally_correct = kwd.get('tools_functionally_correct',
                                          None)
     if tools_functionally_correct is not None:
         clause_list.append(trans.model.RepositoryMetadata.table.c.
                            tools_functionally_correct == util.asbool(
                                tools_functionally_correct))
     # Filter by missing_test_components if received.
     missing_test_components = kwd.get('missing_test_components', None)
     if missing_test_components is not None:
         clause_list.append(
             trans.model.RepositoryMetadata.table.c.missing_test_components
             == util.asbool(missing_test_components))
     # Filter by do_not_test if received.
     do_not_test = kwd.get('do_not_test', None)
     if do_not_test is not None:
         clause_list.append(trans.model.RepositoryMetadata.table.c.
                            do_not_test == util.asbool(do_not_test))
     # Filter by includes_tools if received.
     includes_tools = kwd.get('includes_tools', None)
     if includes_tools is not None:
         clause_list.append(trans.model.RepositoryMetadata.table.c.
                            includes_tools == util.asbool(includes_tools))
     # Filter by test_install_error if received.
     test_install_error = kwd.get('test_install_error', None)
     if test_install_error is not None:
         clause_list.append(
             trans.model.RepositoryMetadata.table.c.test_install_error ==
             util.asbool(test_install_error))
     # Filter by skip_tool_test if received.
     skip_tool_test = kwd.get('skip_tool_test', None)
     if skip_tool_test is not None:
         skip_tool_test = util.asbool(skip_tool_test)
         skipped_metadata_ids_subquery = select(
             [trans.app.model.SkipToolTest.table.c.repository_metadata_id])
         if skip_tool_test:
             clause_list.append(
                 trans.model.RepositoryMetadata.id.in_(
                     skipped_metadata_ids_subquery))
         else:
             clause_list.append(
                 not_(
                     trans.model.RepositoryMetadata.id.in_(
                         skipped_metadata_ids_subquery)))
     for repository_metadata in trans.sa_session.query( trans.app.model.RepositoryMetadata ) \
                                                .filter( and_( *clause_list ) ) \
                                                .order_by( trans.app.model.RepositoryMetadata.table.c.repository_id.desc() ):
         repository_metadata_dict = repository_metadata.to_dict(
             view='collection', value_mapper=self.__get_value_mapper(trans))
         repository_metadata_dict['url'] = web.url_for(
             controller='repository_revisions',
             action='show',
             id=trans.security.encode_id(repository_metadata.id))
         repository_metadata_dicts.append(repository_metadata_dict)
     return repository_metadata_dicts
def validate_repositories( app, info_only=False, verbosity=1 ):
    """
    Inspect records in the repository_metadata table that are associated with repositories of type TOOL_DEPENDENCY_DEFINITION
    to ensure they are valid and set the repository_metadata.do_not_test column value to True if the metadata is invalid.
    Each repository's metadata should look something like:
    "{"tool_dependencies": 
        {"libpng/1.2.5": {"name": "libpng", 
                          "readme": "README content", 
                          "type": "package", 
                          "version": "1.2.5"}}}"
    or:
    "{"repository_dependencies": 
        {"description": null, 
         "repository_dependencies": 
             [["http://localhost:9009", "package_libpng_1_2", "iuc", "5788512d4c0a", "True", "False"]]}, 
         "tool_dependencies": 
             {"libgd/2.1.0": 
                 {"name": "libgd", "readme": "text"}, 
              "libpng/1.2.5": 
                 {"name": "libpng", "type": "package", "version": "1.2.5"}}}"
    """
    invalid_metadata = 0
    records_checked = 0
    skip_metadata_ids = []
    start = time.time()
    valid_metadata = 0
    # Restrict testing to repositories of type TOOL_DEPENDENCY_DEFINITION
    tool_dependency_defintion_repository_ids = []
    for repository in app.sa_session.query( app.model.Repository ) \
                                    .filter( and_( app.model.Repository.table.c.deleted == False,
                                                   app.model.Repository.table.c.type == TOOL_DEPENDENCY_DEFINITION ) ):
        tool_dependency_defintion_repository_ids.append( repository.id )
    # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
    skip_metadata_ids = select( [ app.model.SkipToolTest.table.c.repository_metadata_id ] )
    # Get the list of metadata records to check, restricting it to records that have not been flagged do_not_test.
    for repository_metadata in \
        app.sa_session.query( app.model.RepositoryMetadata ) \
                      .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
                                     app.model.RepositoryMetadata.table.c.do_not_test == False,
                                     app.model.RepositoryMetadata.table.c.repository_id.in_( tool_dependency_defintion_repository_ids ),
                                     not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
        records_checked += 1
        # Check the next repository revision.
        changeset_revision = str( repository_metadata.changeset_revision )
        name = repository.name
        owner = repository.user.username
        metadata = repository_metadata.metadata
        repository = repository_metadata.repository
        if verbosity >= 1:
            print '# -------------------------------------------------------------------------------------------'
            print '# Checking revision %s of %s owned by %s.' % ( changeset_revision, name, owner )
        if metadata:
            # Valid metadata will undoubtedly have a tool_dependencies entry or  repository_dependencies entry.
            repository_dependencies = metadata.get( 'repository_dependencies', None )
            tool_dependencies = metadata.get( 'tool_dependencies', None )
            if repository_dependencies or tool_dependencies:
                print 'Revision %s of %s owned by %s has valid metadata.' % ( changeset_revision, name, owner )
                valid_metadata += 1
            else:
                if verbosity >= 1:
                    print 'Revision %s of %s owned by %s has invalid metadata.' % ( changeset_revision, name, owner )
                invalid_metadata += 1
            if not info_only:
                # Create the tool_test_results_dict dictionary, using the dictionary from the previous test run if available.
                if repository_metadata.tool_test_results is not None:
                    # We'll listify the column value in case it uses the old approach of storing the results of only a single test run.
                    tool_test_results_dicts = listify( repository_metadata.tool_test_results )
                else:
                    tool_test_results_dicts = []
                if tool_test_results_dicts:
                    # Inspect the tool_test_results_dict for the last test run in case it contains only a test_environment
                    # entry.  This will occur with multiple runs of this script without running the associated
                    # install_and_test_tool_sed_repositories.sh script which will further populate the tool_test_results_dict.
                    tool_test_results_dict = tool_test_results_dicts[ 0 ]
                    if len( tool_test_results_dict ) <= 1:
                        # We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only
                        # a test_environment entry.  If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts
                        # since it will be re-inserted later.
                        tool_test_results_dict = tool_test_results_dicts.pop( 0 )
                    elif len( tool_test_results_dict ) == 2 and \
                        'test_environment' in tool_test_results_dict and 'missing_test_components' in tool_test_results_dict:
                        # We can re-use tool_test_results_dict if its only entries are "test_environment" and "missing_test_components".
                        # In this case, some tools are missing tests components while others are not.
                        tool_test_results_dict = tool_test_results_dicts.pop( 0 )
                    else:
                        # The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
                        tool_test_results_dict = {}
                else:
                    # Create a new dictionary for the most recent test run.
                    tool_test_results_dict = {}
                # Initialize the tool_test_results_dict dictionary with the information about the current test environment.
                test_environment_dict = tool_test_results_dict.get( 'test_environment', {} )
                test_environment_dict[ 'tool_shed_database_version' ] = get_database_version( app )
                test_environment_dict[ 'tool_shed_mercurial_version' ] = __version__.version
                test_environment_dict[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
                tool_test_results_dict[ 'test_environment' ] = test_environment_dict
                # Store only the configured number of test runs.
                num_tool_test_results_saved = int( app.config.num_tool_test_results_saved )
                if len( tool_test_results_dicts ) >= num_tool_test_results_saved:
                    test_results_index = num_tool_test_results_saved - 1
                    new_tool_test_results_dicts = tool_test_results_dicts[ :test_results_index ]
                else:
                    new_tool_test_results_dicts = [ d for d in tool_test_results_dicts ]
                # Insert the new element into the first position in the list.
                new_tool_test_results_dicts.insert( 0, tool_test_results_dict )
                repository_metadata.tool_test_results = new_tool_test_results_dicts
                app.sa_session.add( repository_metadata )
                app.sa_session.flush()
    stop = time.time()
    print '# -------------------------------------------------------------------------------------------'
    print '# Checked %d repository revisions.' % records_checked
    print '# %d revisions found with valid tool dependency definition metadata.' % valid_metadata
    print '# %d revisions found with invalid tool dependency definition metadata.' % invalid_metadata
    if info_only:
        print '# Database not updated with any information from this run.'
    print "# Elapsed time: ", stop - start
    print "#############################################################################"
Ejemplo n.º 18
0
 def index( self, trans, **kwd ):
     """
     GET /api/repository_revisions
     Displays a collection (list) of repository revisions.
     """
     # Example URL: http://localhost:9009/api/repository_revisions
     repository_metadata_dicts = []
     # Build up an anded clause list of filters.
     clause_list = []
     # Filter by downloadable if received.
     downloadable =  kwd.get( 'downloadable', None )
     if downloadable is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.downloadable == util.string_as_bool( downloadable ) )
     # Filter by malicious if received.
     malicious =  kwd.get( 'malicious', None )
     if malicious is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.malicious == util.string_as_bool( malicious ) )
     # Filter by tools_functionally_correct if received.
     tools_functionally_correct = kwd.get( 'tools_functionally_correct', None )
     if tools_functionally_correct is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.tools_functionally_correct == util.string_as_bool( tools_functionally_correct ) )
     # Filter by missing_test_components if received.
     missing_test_components = kwd.get( 'missing_test_components', None )
     if missing_test_components is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.missing_test_components == util.string_as_bool( missing_test_components ) )
     # Filter by do_not_test if received.
     do_not_test = kwd.get( 'do_not_test', None )
     if do_not_test is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.do_not_test == util.string_as_bool( do_not_test ) )
     # Filter by includes_tools if received.
     includes_tools = kwd.get( 'includes_tools', None )
     if includes_tools is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.includes_tools == util.string_as_bool( includes_tools ) )
     # Filter by test_install_error if received.
     test_install_error = kwd.get( 'test_install_error', None )
     if test_install_error is not None:
         clause_list.append( trans.model.RepositoryMetadata.table.c.test_install_error == util.string_as_bool( test_install_error ) )
     # Filter by skip_tool_test if received.
     skip_tool_test = kwd.get( 'skip_tool_test', None )
     if skip_tool_test is not None:
         skip_tool_test = util.string_as_bool( skip_tool_test )
         skipped_metadata_ids_subquery = select( [ trans.app.model.SkipToolTest.table.c.repository_metadata_id ] )
         if skip_tool_test:
             clause_list.append( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) )
         else:
             clause_list.append( not_( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) ) )
     # Generate and execute the query.
     try:
         query = trans.sa_session.query( trans.app.model.RepositoryMetadata ) \
                                 .filter( and_( *clause_list ) ) \
                                 .order_by( trans.app.model.RepositoryMetadata.table.c.repository_id ) \
                                 .all()
         for repository_metadata in query:
             repository_metadata_dict = repository_metadata.get_api_value( view='collection',
                                                                           value_mapper=default_value_mapper( trans, repository_metadata ) )
             repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',
                                                              action='show',
                                                              id=trans.security.encode_id( repository_metadata.id ) )
             repository_metadata_dicts.append( repository_metadata_dict )
         return repository_metadata_dicts
     except Exception, e:
         message = "Error in the Tool Shed repository_revisions API in index: " + str( e )
         log.error( message, exc_info=True )
         trans.response.status = 500
         return message
Ejemplo n.º 19
0
def validate_repositories(app, info_only=False, verbosity=1):
    """
    Inspect records in the repository_metadata table that are associated with repositories of type TOOL_DEPENDENCY_DEFINITION
    to ensure they are valid and set the repository_metadata.do_not_test column value to True if the metadata is invalid.
    Each repository's metadata should look something like:
    "{"tool_dependencies": 
        {"libpng/1.2.5": {"name": "libpng", 
                          "readme": "README content", 
                          "type": "package", 
                          "version": "1.2.5"}}}"
    or:
    "{"repository_dependencies": 
        {"description": null, 
         "repository_dependencies": 
             [["http://localhost:9009", "package_libpng_1_2", "iuc", "5788512d4c0a", "True", "False"]]}, 
         "tool_dependencies": 
             {"libgd/2.1.0": 
                 {"name": "libgd", "readme": "text"}, 
              "libpng/1.2.5": 
                 {"name": "libpng", "type": "package", "version": "1.2.5"}}}"
    """
    invalid_metadata = 0
    records_checked = 0
    skip_metadata_ids = []
    start = time.time()
    valid_metadata = 0
    # Restrict testing to repositories of type TOOL_DEPENDENCY_DEFINITION
    tool_dependency_defintion_repository_ids = []
    for repository in app.sa_session.query( app.model.Repository ) \
                                    .filter( and_( app.model.Repository.table.c.deleted == False,
                                                   app.model.Repository.table.c.type == TOOL_DEPENDENCY_DEFINITION ) ):
        tool_dependency_defintion_repository_ids.append(repository.id)
    # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
    skip_metadata_ids = select(
        [app.model.SkipToolTest.table.c.repository_metadata_id])
    # Get the list of metadata records to check, restricting it to records that have not been flagged do_not_test.
    for repository_metadata in \
        app.sa_session.query( app.model.RepositoryMetadata ) \
                      .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
                                     app.model.RepositoryMetadata.table.c.do_not_test == False,
                                     app.model.RepositoryMetadata.table.c.repository_id.in_( tool_dependency_defintion_repository_ids ),
                                     not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
        records_checked += 1
        # Check the next repository revision.
        changeset_revision = str(repository_metadata.changeset_revision)
        name = repository.name
        owner = repository.user.username
        metadata = repository_metadata.metadata
        repository = repository_metadata.repository
        if verbosity >= 1:
            print '# -------------------------------------------------------------------------------------------'
            print '# Checking revision %s of %s owned by %s.' % (
                changeset_revision, name, owner)
        if metadata:
            # Valid metadata will undoubtedly have a tool_dependencies entry or  repository_dependencies entry.
            repository_dependencies = metadata.get('repository_dependencies',
                                                   None)
            tool_dependencies = metadata.get('tool_dependencies', None)
            if repository_dependencies or tool_dependencies:
                print 'Revision %s of %s owned by %s has valid metadata.' % (
                    changeset_revision, name, owner)
                valid_metadata += 1
            else:
                if verbosity >= 1:
                    print 'Revision %s of %s owned by %s has invalid metadata.' % (
                        changeset_revision, name, owner)
                invalid_metadata += 1
            if not info_only:
                # Create the tool_test_results_dict dictionary, using the dictionary from the previous test run if available.
                if repository_metadata.tool_test_results is not None:
                    # We'll listify the column value in case it uses the old approach of storing the results of only a single test run.
                    tool_test_results_dicts = listify(
                        repository_metadata.tool_test_results)
                else:
                    tool_test_results_dicts = []
                if tool_test_results_dicts:
                    # Inspect the tool_test_results_dict for the last test run in case it contains only a test_environment
                    # entry.  This will occur with multiple runs of this script without running the associated
                    # install_and_test_tool_sed_repositories.sh script which will further populate the tool_test_results_dict.
                    tool_test_results_dict = tool_test_results_dicts[0]
                    if len(tool_test_results_dict) <= 1:
                        # We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only
                        # a test_environment entry.  If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts
                        # since it will be re-inserted later.
                        tool_test_results_dict = tool_test_results_dicts.pop(0)
                    elif len( tool_test_results_dict ) == 2 and \
                        'test_environment' in tool_test_results_dict and 'missing_test_components' in tool_test_results_dict:
                        # We can re-use tool_test_results_dict if its only entries are "test_environment" and "missing_test_components".
                        # In this case, some tools are missing tests components while others are not.
                        tool_test_results_dict = tool_test_results_dicts.pop(0)
                    else:
                        # The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
                        tool_test_results_dict = {}
                else:
                    # Create a new dictionary for the most recent test run.
                    tool_test_results_dict = {}
                # Initialize the tool_test_results_dict dictionary with the information about the current test environment.
                test_environment_dict = tool_test_results_dict.get(
                    'test_environment', {})
                test_environment_dict[
                    'tool_shed_database_version'] = get_database_version(app)
                test_environment_dict[
                    'tool_shed_mercurial_version'] = __version__.version
                test_environment_dict[
                    'tool_shed_revision'] = get_repository_current_revision(
                        os.getcwd())
                tool_test_results_dict[
                    'test_environment'] = test_environment_dict
                # Store only the configured number of test runs.
                num_tool_test_results_saved = int(
                    app.config.num_tool_test_results_saved)
                if len(tool_test_results_dicts) >= num_tool_test_results_saved:
                    test_results_index = num_tool_test_results_saved - 1
                    new_tool_test_results_dicts = tool_test_results_dicts[:
                                                                          test_results_index]
                else:
                    new_tool_test_results_dicts = [
                        d for d in tool_test_results_dicts
                    ]
                # Insert the new element into the first position in the list.
                new_tool_test_results_dicts.insert(0, tool_test_results_dict)
                repository_metadata.tool_test_results = new_tool_test_results_dicts
                app.sa_session.add(repository_metadata)
                app.sa_session.flush()
    stop = time.time()
    print '# -------------------------------------------------------------------------------------------'
    print '# Checked %d repository revisions.' % records_checked
    print '# %d revisions found with valid tool dependency definition metadata.' % valid_metadata
    print '# %d revisions found with invalid tool dependency definition metadata.' % invalid_metadata
    if info_only:
        print '# Database not updated with any information from this run.'
    print "# Elapsed time: ", stop - start
    print "#############################################################################"
def check_and_flag_repositories( app, info_only=False, verbosity=1 ):
    '''
    This method will iterate through all records in the repository_metadata table, checking each one for tool metadata, 
    then checking the tool metadata for tests.
    Each tool's metadata should look something like:
    {
      "add_to_tool_panel": true,
      "description": "",
      "guid": "toolshed.url:9009/repos/owner/name/tool_id/1.2.3",
      "id": "tool_wrapper",
      "name": "Map with Tool Wrapper",
      "requirements": [],
      "tests": [
        {
          "inputs": [ [ "parameter", "value" ], [ "other_parameter", "other_value" ], ],
          "name": "Test-1",
          "outputs": [
            [
              "output_field_name",
              "output_file_name.bed"
            ]
          ],
          "required_files": [ '1.bed', '2.bed', '3.bed' ]
        }
      ],
      "tool_config": "database/community_files/000/repo_1/tool_wrapper.xml",
      "tool_type": "default",
      "version": "1.2.3",
      "version_string_cmd": null
    }
    
    If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository)
    not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install
    and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision
    not to be tested.
    
    TODO: Update this dict structure with the recently added components.
    
    If any error is encountered, the script will update the repository_metadata.tool_test_results attribute following this structure:
    {
        "test_environment":
            {
                 "galaxy_revision": "9001:abcd1234",
                 "galaxy_database_version": "114",
                 "tool_shed_revision": "9001:abcd1234",
                 "tool_shed_mercurial_version": "2.3.1",
                 "tool_shed_database_version": "17",
                 "python_version": "2.7.2",
                 "architecture": "x86_64",
                 "system": "Darwin 12.2.0"
            },
         "passed_tests":
            [
                {
                    "test_id": "The test ID, generated by twill",
                    "tool_id": "The tool ID that was tested",
                    "tool_version": "The tool version that was tested",
                },
            ]
        "failed_tests":
            [
                {
                    "test_id": "The test ID, generated by twill",
                    "tool_id": "The tool ID that was tested",
                    "tool_version": "The tool version that was tested",
                    "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
                    "traceback": "The captured traceback."
                },
            ]
        "installation_errors":
            {
                 'tool_dependencies':
                     [
                         {
                            'type': 'Type of tool dependency, e.g. package, set_environment, etc.', 
                            'name': 'Name of the tool dependency.', 
                            'version': 'Version if this is a package, otherwise blank.',
                            'error_message': 'The error message returned when installation was attempted.',
                         },
                     ],
                 'repository_dependencies':
                     [
                         {
                            'tool_shed': 'The tool shed that this repository was installed from.', 
                            'name': 'The name of the repository that failed to install.', 
                            'owner': 'Owner of the failed repository.',
                            'changeset_revision': 'Changeset revision of the failed repository.',
                            'error_message': 'The error message that was returned when the repository failed to install.',
                         },
                     ],
                 'current_repository':
                     [
                         {
                            'tool_shed': 'The tool shed that this repository was installed from.', 
                            'name': 'The name of the repository that failed to install.', 
                            'owner': 'Owner of the failed repository.',
                            'changeset_revision': 'Changeset revision of the failed repository.',
                            'error_message': 'The error message that was returned when the repository failed to install.',
                         },
                     ],
                {
                    "name": "The name of the repository.",
                    "owner": "The owner of the repository.",
                    "changeset_revision": "The changeset revision of the repository.",
                    "error_message": "The message stored in tool_dependency.error_message."
                },
            }
         "missing_test_components":
            [
                {
                    "tool_id": "The tool ID that missing components.",
                    "tool_version": "The version of the tool."
                    "tool_guid": "The guid of the tool."
                    "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory."
                },
            ]
    }
    '''
    start = time.time()
    skip_metadata_ids = []
    checked_repository_ids = []
    tool_count = 0
    has_tests = 0
    no_tests = 0
    no_tools = 0
    valid_revisions = 0
    invalid_revisions = 0
    records_checked = 0
    # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
    skip_metadata_ids = select( [ app.model.SkipToolTest.table.c.repository_metadata_id ] )
    # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test,
    # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable,
    # because it's redundant to test a revision that a user can't install.
    for metadata_record in app.sa_session.query( app.model.RepositoryMetadata ) \
                                         .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
                                                        app.model.RepositoryMetadata.table.c.includes_tools == True,
                                                        app.model.RepositoryMetadata.table.c.do_not_test == False,
                                                        not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
        records_checked += 1
        # Initialize the repository_status dict with the test environment, but leave the test_errors empty. 
        repository_status = {}
        if metadata_record.tool_test_results:
            repository_status = metadata_record.tool_test_results
        # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows,
        # or tests incorrectly labeled as invalid.
        missing_test_components = []
        if 'test_environment' in repository_status:
            repository_status[ 'test_environment' ] = get_test_environment( repository_status[ 'test_environment' ] )
        else:
            repository_status[ 'test_environment' ] = get_test_environment()
        repository_status[ 'test_environment' ][ 'tool_shed_database_version' ] = get_database_version( app )
        repository_status[ 'test_environment' ][ 'tool_shed_mercurial_version' ] = __version__.version
        repository_status[ 'test_environment' ][ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
        name = metadata_record.repository.name
        owner = metadata_record.repository.user.username
        changeset_revision = str( metadata_record.changeset_revision )
        if metadata_record.repository.id not in checked_repository_ids:
            checked_repository_ids.append( metadata_record.repository.id )
        if verbosity >= 1:
            print '# -------------------------------------------------------------------------------------------'
            print '# Now checking revision %s of %s, owned by %s.' % ( changeset_revision,  name, owner ) 
        # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
        # only repositories that contain tools.
        if 'tools' not in metadata_record.metadata:
            continue
        else:
            has_test_data = False
            testable_revision_found = False
            # Clone the repository up to the changeset revision we're checking.
            repo_dir = metadata_record.repository.repo_path( app )
            repo = hg.repository( get_configured_ui(), repo_dir )
            work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-cafr"  )
            cloned_ok, error_message = clone_repository( repo_dir, work_dir, changeset_revision )
            if cloned_ok:
                # Iterate through all the directories in the cloned changeset revision and determine whether there's a
                # directory named test-data. If this directory is not present, update the metadata record for the changeset
                # revision we're checking.
                for root, dirs, files in os.walk( work_dir ):
                    if '.hg' in dirs:
                        dirs.remove( '.hg' )
                    if 'test-data' in dirs:
                        has_test_data = True
                        test_data_path = os.path.join( root, dirs[ dirs.index( 'test-data' ) ] )
                        break
            if verbosity >= 1:
                if not has_test_data:
                    print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
                else:
                    print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
                print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
                    ( changeset_revision,  name, owner ) 
            # Loop through all the tools in this metadata record, checking each one for defined functional tests.
            for tool_metadata in metadata_record.metadata[ 'tools' ]:
                tool_count += 1
                tool_id = tool_metadata[ 'id' ]
                tool_version = tool_metadata[ 'version' ]
                tool_guid = tool_metadata[ 'guid' ]
                if verbosity >= 2:
                    print "# Checking tool ID '%s' in changeset revision %s of %s." % \
                        ( tool_id, changeset_revision, name ) 
                # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
                # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
                # automated functional test framework produces.
                tool_has_tests = True
                if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]:
                    tool_has_tests = False
                    if verbosity >= 2:
                        print '# No functional tests defined for %s.' % tool_id
                    no_tests += 1
                else:
                    tool_has_tests = True
                    if verbosity >= 2:
                        print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
                            ( tool_id, changeset_revision, name ) 
                    has_tests += 1
                failure_reason = ''
                problem_found = False
                missing_test_files = []
                has_test_files = False
                if tool_has_tests and has_test_data:
                    missing_test_files = check_for_missing_test_files( tool_metadata[ 'tests' ], test_data_path )
                    if missing_test_files:
                        if verbosity >= 2:
                            print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
                                ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) ) 
                    else:
                        has_test_files = True
                if not has_test_data:
                    failure_reason += 'Repository does not have a test-data directory. '
                    problem_found = True
                if not tool_has_tests:
                    failure_reason += 'Functional test definitions missing for %s. ' % tool_id
                    problem_found = True
                if missing_test_files:
                    failure_reason += 'One or more test files are missing for tool %s: %s' % ( tool_id, ', '.join( missing_test_files ) )
                    problem_found = True
                test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid,
                                    missing_components=failure_reason )
                # The repository_metadata.tool_test_results attribute should always have the following structure:
                # {
                #     "test_environment":
                #         {
                #              "galaxy_revision": "9001:abcd1234",
                #              "galaxy_database_version": "114",
                #              "tool_shed_revision": "9001:abcd1234",
                #              "tool_shed_mercurial_version": "2.3.1",
                #              "tool_shed_database_version": "17",
                #              "python_version": "2.7.2",
                #              "architecture": "x86_64",
                #              "system": "Darwin 12.2.0"
                #         },
                #      "passed_tests":
                #         [
                #             {
                #                 "test_id": "The test ID, generated by twill",
                #                 "tool_id": "The tool ID that was tested",
                #                 "tool_version": "The tool version that was tested",
                #             },
                #         ]
                #     "failed_tests":
                #         [
                #             {
                #                 "test_id": "The test ID, generated by twill",
                #                 "tool_id": "The tool ID that was tested",
                #                 "tool_version": "The tool version that was tested",
                #                 "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
                #                 "traceback": "The captured traceback."
                #             },
                #         ]
                #     "installation_errors":
                #         {
                #              'tool_dependencies':
                #                  [
                #                      {
                #                         'type': 'Type of tool dependency, e.g. package, set_environment, etc.', 
                #                         'name': 'Name of the tool dependency.', 
                #                         'version': 'Version if this is a package, otherwise blank.',
                #                         'error_message': 'The error message returned when installation was attempted.',
                #                      },
                #                  ],
                #              'repository_dependencies':
                #                  [
                #                      {
                #                         'tool_shed': 'The tool shed that this repository was installed from.', 
                #                         'name': 'The name of the repository that failed to install.', 
                #                         'owner': 'Owner of the failed repository.',
                #                         'changeset_revision': 'Changeset revision of the failed repository.',
                #                         'error_message': 'The error message that was returned when the repository failed to install.',
                #                      },
                #                  ],
                #              'current_repository':
                #                  [
                #                      {
                #                         'tool_shed': 'The tool shed that this repository was installed from.', 
                #                         'name': 'The name of the repository that failed to install.', 
                #                         'owner': 'Owner of the failed repository.',
                #                         'changeset_revision': 'Changeset revision of the failed repository.',
                #                         'error_message': 'The error message that was returned when the repository failed to install.',
                #                      },
                #                  ],
                #             {
                #                 "name": "The name of the repository.",
                #                 "owner": "The owner of the repository.",
                #                 "changeset_revision": "The changeset revision of the repository.",
                #                 "error_message": "The message stored in tool_dependency.error_message."
                #             },
                #         }
                #      "missing_test_components":
                #         [
                #             {
                #                 "tool_id": "The tool ID that missing components.",
                #                 "tool_version": "The version of the tool."
                #                 "tool_guid": "The guid of the tool."
                #                 "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory."
                #             },
                #         ]
                # }
                # 
                # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
                # than the list relevant to what it is testing.
                # Only append this error dict if it hasn't already been added.
                if problem_found:
                    if test_errors not in missing_test_components:
                        missing_test_components.append( test_errors )
                if tool_has_tests and has_test_files:
                    testable_revision_found = True
            # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
            if os.path.exists( work_dir ):
                shutil.rmtree( work_dir )
            if not missing_test_components:
                valid_revisions += 1
                if verbosity >= 1:
                    print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
            else:
                invalid_revisions += 1
                if verbosity >= 1:
                    print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
                    if verbosity >= 2:
                        for invalid_test in missing_test_components:
                            if 'missing_components' in invalid_test:
                                print '# %s' % invalid_test[ 'missing_components' ]
            if not info_only:
                # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform
                # on which this script was run.
                if missing_test_components:
                    # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been 
                    # found in this revision, and:
                    # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision.
                    #    In this case, the revision will never be updated with the missing components, and re-testing it would be redundant.
                    # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable
                    #    revision. In this case, if the repository is updated with test data or functional tests, the downloadable
                    #    changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable
                    #    changeset revision will be created, either of which will be automatically checked and flagged as appropriate.
                    #    In the install and test script, this behavior is slightly different, since we do want to always run functional
                    #    tests on the most recent downloadable changeset revision.
                    if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ) and not testable_revision_found:
                        metadata_record.do_not_test = True
                    metadata_record.tools_functionally_correct = False
                    metadata_record.missing_test_components = True
                    repository_status[ 'missing_test_components' ] = missing_test_components
                metadata_record.tool_test_results = repository_status
                metadata_record.time_last_tested = datetime.utcnow()
                app.sa_session.add( metadata_record )
                app.sa_session.flush()
    stop = time.time()
    print '# -------------------------------------------------------------------------------------------'
    print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, records_checked )
    print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions
    print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions
    print '# Found %d tools without functional tests.' % no_tests
    print '# Found %d tools with functional tests.' % has_tests
    if info_only:
        print '# Database not updated, info_only set.'
    print "# Elapsed time: ", stop - start
    print "#############################################################################" 
Ejemplo n.º 21
0
def check_and_flag_repositories(app, info_only=False, verbosity=1):
    '''
    This method will iterate through all records in the repository_metadata table, checking each one for tool metadata, 
    then checking the tool metadata for tests.
    Each tool's metadata should look something like:
    {
      "add_to_tool_panel": true,
      "description": "",
      "guid": "toolshed.url:9009/repos/owner/name/tool_id/1.2.3",
      "id": "tool_wrapper",
      "name": "Map with Tool Wrapper",
      "requirements": [],
      "tests": [
        {
          "inputs": [ [ "parameter", "value" ], [ "other_parameter", "other_value" ], ],
          "name": "Test-1",
          "outputs": [
            [
              "output_field_name",
              "output_file_name.bed"
            ]
          ],
          "required_files": [ '1.bed', '2.bed', '3.bed' ]
        }
      ],
      "tool_config": "database/community_files/000/repo_1/tool_wrapper.xml",
      "tool_type": "default",
      "version": "1.2.3",
      "version_string_cmd": null
    }
    
    If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository)
    not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install
    and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision
    not to be tested.
    
    TODO: Update this dict structure with the recently added components.
    
    If any error is encountered, the script will update the repository_metadata.tool_test_results attribute following this structure:
    {
        "test_environment":
            {
                 "galaxy_revision": "9001:abcd1234",
                 "galaxy_database_version": "114",
                 "tool_shed_revision": "9001:abcd1234",
                 "tool_shed_mercurial_version": "2.3.1",
                 "tool_shed_database_version": "17",
                 "python_version": "2.7.2",
                 "architecture": "x86_64",
                 "system": "Darwin 12.2.0"
            },
         "passed_tests":
            [
                {
                    "test_id": "The test ID, generated by twill",
                    "tool_id": "The tool ID that was tested",
                    "tool_version": "The tool version that was tested",
                },
            ]
        "failed_tests":
            [
                {
                    "test_id": "The test ID, generated by twill",
                    "tool_id": "The tool ID that was tested",
                    "tool_version": "The tool version that was tested",
                    "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
                    "traceback": "The captured traceback."
                },
            ]
        "installation_errors":
            {
                 'tool_dependencies':
                     [
                         {
                            'type': 'Type of tool dependency, e.g. package, set_environment, etc.', 
                            'name': 'Name of the tool dependency.', 
                            'version': 'Version if this is a package, otherwise blank.',
                            'error_message': 'The error message returned when installation was attempted.',
                         },
                     ],
                 'repository_dependencies':
                     [
                         {
                            'tool_shed': 'The tool shed that this repository was installed from.', 
                            'name': 'The name of the repository that failed to install.', 
                            'owner': 'Owner of the failed repository.',
                            'changeset_revision': 'Changeset revision of the failed repository.',
                            'error_message': 'The error message that was returned when the repository failed to install.',
                         },
                     ],
                 'current_repository':
                     [
                         {
                            'tool_shed': 'The tool shed that this repository was installed from.', 
                            'name': 'The name of the repository that failed to install.', 
                            'owner': 'Owner of the failed repository.',
                            'changeset_revision': 'Changeset revision of the failed repository.',
                            'error_message': 'The error message that was returned when the repository failed to install.',
                         },
                     ],
                {
                    "name": "The name of the repository.",
                    "owner": "The owner of the repository.",
                    "changeset_revision": "The changeset revision of the repository.",
                    "error_message": "The message stored in tool_dependency.error_message."
                },
            }
         "missing_test_components":
            [
                {
                    "tool_id": "The tool ID that missing components.",
                    "tool_version": "The version of the tool."
                    "tool_guid": "The guid of the tool."
                    "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory."
                },
            ]
    }
    '''
    start = time.time()
    skip_metadata_ids = []
    checked_repository_ids = []
    tool_count = 0
    has_tests = 0
    no_tests = 0
    no_tools = 0
    valid_revisions = 0
    invalid_revisions = 0
    records_checked = 0
    # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
    skip_metadata_ids = select(
        [app.model.SkipToolTest.table.c.repository_metadata_id])
    # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test,
    # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable,
    # because it's redundant to test a revision that a user can't install.
    for metadata_record in app.sa_session.query( app.model.RepositoryMetadata ) \
                                         .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
                                                        app.model.RepositoryMetadata.table.c.includes_tools == True,
                                                        app.model.RepositoryMetadata.table.c.do_not_test == False,
                                                        not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
        records_checked += 1
        # Initialize the repository_status dict with the test environment, but leave the test_errors empty.
        repository_status = {}
        if metadata_record.tool_test_results:
            repository_status = metadata_record.tool_test_results
        # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows,
        # or tests incorrectly labeled as invalid.
        missing_test_components = []
        if 'test_environment' in repository_status:
            repository_status['test_environment'] = get_test_environment(
                repository_status['test_environment'])
        else:
            repository_status['test_environment'] = get_test_environment()
        repository_status['test_environment'][
            'tool_shed_database_version'] = get_database_version(app)
        repository_status['test_environment'][
            'tool_shed_mercurial_version'] = __version__.version
        repository_status['test_environment'][
            'tool_shed_revision'] = get_repository_current_revision(
                os.getcwd())
        name = metadata_record.repository.name
        owner = metadata_record.repository.user.username
        changeset_revision = str(metadata_record.changeset_revision)
        if metadata_record.repository.id not in checked_repository_ids:
            checked_repository_ids.append(metadata_record.repository.id)
        if verbosity >= 1:
            print '# -------------------------------------------------------------------------------------------'
            print '# Now checking revision %s of %s, owned by %s.' % (
                changeset_revision, name, owner)
        # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
        # only repositories that contain tools.
        if 'tools' not in metadata_record.metadata:
            continue
        else:
            has_test_data = False
            testable_revision_found = False
            # Clone the repository up to the changeset revision we're checking.
            repo_dir = metadata_record.repository.repo_path(app)
            repo = hg.repository(get_configured_ui(), repo_dir)
            work_dir = tempfile.mkdtemp(prefix="tmp-toolshed-cafr")
            cloned_ok, error_message = clone_repository(
                repo_dir, work_dir, changeset_revision)
            if cloned_ok:
                # Iterate through all the directories in the cloned changeset revision and determine whether there's a
                # directory named test-data. If this directory is not present, update the metadata record for the changeset
                # revision we're checking.
                for root, dirs, files in os.walk(work_dir):
                    if '.hg' in dirs:
                        dirs.remove('.hg')
                    if 'test-data' in dirs:
                        has_test_data = True
                        test_data_path = os.path.join(
                            root, dirs[dirs.index('test-data')])
                        break
            if verbosity >= 1:
                if not has_test_data:
                    print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % (
                        changeset_revision, name, owner)
                else:
                    print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % (
                        changeset_revision, name, owner)
                print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
                    ( changeset_revision,  name, owner )
            # Loop through all the tools in this metadata record, checking each one for defined functional tests.
            for tool_metadata in metadata_record.metadata['tools']:
                tool_count += 1
                tool_id = tool_metadata['id']
                tool_version = tool_metadata['version']
                tool_guid = tool_metadata['guid']
                if verbosity >= 2:
                    print "# Checking tool ID '%s' in changeset revision %s of %s." % \
                        ( tool_id, changeset_revision, name )
                # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
                # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
                # automated functional test framework produces.
                tool_has_tests = True
                if 'tests' not in tool_metadata or not tool_metadata['tests']:
                    tool_has_tests = False
                    if verbosity >= 2:
                        print '# No functional tests defined for %s.' % tool_id
                    no_tests += 1
                else:
                    tool_has_tests = True
                    if verbosity >= 2:
                        print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
                            ( tool_id, changeset_revision, name )
                    has_tests += 1
                failure_reason = ''
                problem_found = False
                missing_test_files = []
                has_test_files = False
                if tool_has_tests and has_test_data:
                    missing_test_files = check_for_missing_test_files(
                        tool_metadata['tests'], test_data_path)
                    if missing_test_files:
                        if verbosity >= 2:
                            print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
                                ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
                    else:
                        has_test_files = True
                if not has_test_data:
                    failure_reason += 'Repository does not have a test-data directory. '
                    problem_found = True
                if not tool_has_tests:
                    failure_reason += 'Functional test definitions missing for %s. ' % tool_id
                    problem_found = True
                if missing_test_files:
                    failure_reason += 'One or more test files are missing for tool %s: %s' % (
                        tool_id, ', '.join(missing_test_files))
                    problem_found = True
                test_errors = dict(tool_id=tool_id,
                                   tool_version=tool_version,
                                   tool_guid=tool_guid,
                                   missing_components=failure_reason)
                # The repository_metadata.tool_test_results attribute should always have the following structure:
                # {
                #     "test_environment":
                #         {
                #              "galaxy_revision": "9001:abcd1234",
                #              "galaxy_database_version": "114",
                #              "tool_shed_revision": "9001:abcd1234",
                #              "tool_shed_mercurial_version": "2.3.1",
                #              "tool_shed_database_version": "17",
                #              "python_version": "2.7.2",
                #              "architecture": "x86_64",
                #              "system": "Darwin 12.2.0"
                #         },
                #      "passed_tests":
                #         [
                #             {
                #                 "test_id": "The test ID, generated by twill",
                #                 "tool_id": "The tool ID that was tested",
                #                 "tool_version": "The tool version that was tested",
                #             },
                #         ]
                #     "failed_tests":
                #         [
                #             {
                #                 "test_id": "The test ID, generated by twill",
                #                 "tool_id": "The tool ID that was tested",
                #                 "tool_version": "The tool version that was tested",
                #                 "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
                #                 "traceback": "The captured traceback."
                #             },
                #         ]
                #     "installation_errors":
                #         {
                #              'tool_dependencies':
                #                  [
                #                      {
                #                         'type': 'Type of tool dependency, e.g. package, set_environment, etc.',
                #                         'name': 'Name of the tool dependency.',
                #                         'version': 'Version if this is a package, otherwise blank.',
                #                         'error_message': 'The error message returned when installation was attempted.',
                #                      },
                #                  ],
                #              'repository_dependencies':
                #                  [
                #                      {
                #                         'tool_shed': 'The tool shed that this repository was installed from.',
                #                         'name': 'The name of the repository that failed to install.',
                #                         'owner': 'Owner of the failed repository.',
                #                         'changeset_revision': 'Changeset revision of the failed repository.',
                #                         'error_message': 'The error message that was returned when the repository failed to install.',
                #                      },
                #                  ],
                #              'current_repository':
                #                  [
                #                      {
                #                         'tool_shed': 'The tool shed that this repository was installed from.',
                #                         'name': 'The name of the repository that failed to install.',
                #                         'owner': 'Owner of the failed repository.',
                #                         'changeset_revision': 'Changeset revision of the failed repository.',
                #                         'error_message': 'The error message that was returned when the repository failed to install.',
                #                      },
                #                  ],
                #             {
                #                 "name": "The name of the repository.",
                #                 "owner": "The owner of the repository.",
                #                 "changeset_revision": "The changeset revision of the repository.",
                #                 "error_message": "The message stored in tool_dependency.error_message."
                #             },
                #         }
                #      "missing_test_components":
                #         [
                #             {
                #                 "tool_id": "The tool ID that missing components.",
                #                 "tool_version": "The version of the tool."
                #                 "tool_guid": "The guid of the tool."
                #                 "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory."
                #             },
                #         ]
                # }
                #
                # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
                # than the list relevant to what it is testing.
                # Only append this error dict if it hasn't already been added.
                if problem_found:
                    if test_errors not in missing_test_components:
                        missing_test_components.append(test_errors)
                if tool_has_tests and has_test_files:
                    testable_revision_found = True
            # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
            if os.path.exists(work_dir):
                shutil.rmtree(work_dir)
            if not missing_test_components:
                valid_revisions += 1
                if verbosity >= 1:
                    print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % (
                        changeset_revision, name, owner)
            else:
                invalid_revisions += 1
                if verbosity >= 1:
                    print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % (
                        changeset_revision, name, owner)
                    if verbosity >= 2:
                        for invalid_test in missing_test_components:
                            if 'missing_components' in invalid_test:
                                print '# %s' % invalid_test[
                                    'missing_components']
            if not info_only:
                # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform
                # on which this script was run.
                if missing_test_components:
                    # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been
                    # found in this revision, and:
                    # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision.
                    #    In this case, the revision will never be updated with the missing components, and re-testing it would be redundant.
                    # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable
                    #    revision. In this case, if the repository is updated with test data or functional tests, the downloadable
                    #    changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable
                    #    changeset revision will be created, either of which will be automatically checked and flagged as appropriate.
                    #    In the install and test script, this behavior is slightly different, since we do want to always run functional
                    #    tests on the most recent downloadable changeset revision.
                    if should_set_do_not_test_flag(
                            app, metadata_record.repository, changeset_revision
                    ) and not testable_revision_found:
                        metadata_record.do_not_test = True
                    metadata_record.tools_functionally_correct = False
                    metadata_record.missing_test_components = True
                    repository_status[
                        'missing_test_components'] = missing_test_components
                metadata_record.tool_test_results = repository_status
                metadata_record.time_last_tested = datetime.utcnow()
                app.sa_session.add(metadata_record)
                app.sa_session.flush()
    stop = time.time()
    print '# -------------------------------------------------------------------------------------------'
    print '# Checked %d repositories with %d tools in %d changeset revisions.' % (
        len(checked_repository_ids), tool_count, records_checked)
    print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions
    print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions
    print '# Found %d tools without functional tests.' % no_tests
    print '# Found %d tools with functional tests.' % has_tests
    if info_only:
        print '# Database not updated, info_only set.'
    print "# Elapsed time: ", stop - start
    print "#############################################################################"