Ejemplo n.º 1
0
def upload_tar( trans, rdah, tdah, repository, tar, uploaded_file, upload_point, remove_repo_files_not_in_tar,
                commit_message, new_repo_alert ):
    # Upload a tar archive of files.
    repo_dir = repository.repo_path( trans.app )
    hg_util.get_repo_for_repository( trans.app, repository=None, repo_path=repo_dir, create=False )
    undesirable_dirs_removed = 0
    undesirable_files_removed = 0
    check_results = commit_util.check_archive( repository, tar )
    if check_results.invalid:
        tar.close()
        uploaded_file.close()
        message = '%s Invalid paths were: %s' % (
            ' '.join( check_results.errors ), ', '.join( check_results.invalid ) )
        return False, message, [], '', undesirable_dirs_removed, undesirable_files_removed
    else:
        if upload_point is not None:
            full_path = os.path.abspath( os.path.join( repo_dir, upload_point ) )
        else:
            full_path = os.path.abspath( repo_dir )
        undesirable_files_removed = len( check_results.undesirable_files )
        undesirable_dirs_removed = len( check_results.undesirable_dirs )
        filenames_in_archive = [ ti.name for ti in check_results.valid ]
        # Extract the uploaded tar to the load_point within the repository hierarchy.
        tar.extractall( path=full_path, members=check_results.valid )
        tar.close()
        uploaded_file.close()
        for filename in filenames_in_archive:
            uploaded_file_name = os.path.join( full_path, filename )
            if os.path.split( uploaded_file_name )[ -1 ] == rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME:
                # Inspect the contents of the file to see if toolshed or changeset_revision attributes
                # are missing and if so, set them appropriately.
                altered, root_elem, error_message = rdah.handle_tag_attributes( uploaded_file_name )
                if error_message:
                    return False, error_message, [], '', [], []
                elif altered:
                    tmp_filename = xml_util.create_and_write_tmp_file( root_elem )
                    shutil.move( tmp_filename, uploaded_file_name )
            elif os.path.split( uploaded_file_name )[ -1 ] == rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME:
                # Inspect the contents of the file to see if toolshed or changeset_revision
                # attributes are missing and if so, set them appropriately.
                altered, root_elem, error_message = tdah.handle_tag_attributes( uploaded_file_name )
                if error_message:
                    return False, error_message, [], '', [], []
                if altered:
                    tmp_filename = xml_util.create_and_write_tmp_file( root_elem )
                    shutil.move( tmp_filename, uploaded_file_name )
        return commit_util.handle_directory_changes( trans.app,
                                                     trans.request.host,
                                                     trans.user.username,
                                                     repository,
                                                     full_path,
                                                     filenames_in_archive,
                                                     remove_repo_files_not_in_tar,
                                                     new_repo_alert,
                                                     commit_message,
                                                     undesirable_dirs_removed,
                                                     undesirable_files_removed )
Ejemplo n.º 2
0
def create_repository( app, name, type, description, long_description, user_id, category_ids=[], remote_repository_url=None, homepage_url=None ):
    """Create a new ToolShed repository"""
    sa_session = app.model.context.current
    # Add the repository record to the database.
    repository = app.model.Repository( name=name,
                                       type=type,
                                       remote_repository_url=remote_repository_url,
                                       homepage_url=homepage_url,
                                       description=description,
                                       long_description=long_description,
                                       user_id=user_id )
    # Flush to get the id.
    sa_session.add( repository )
    sa_session.flush()
    # Create an admin role for the repository.
    create_repository_admin_role( app, repository )
    # Determine the repository's repo_path on disk.
    dir = os.path.join( app.config.file_path, *directory_hash_id( repository.id ) )
    # Create directory if it does not exist.
    if not os.path.exists( dir ):
        os.makedirs( dir )
    # Define repo name inside hashed directory.
    repository_path = os.path.join( dir, "repo_%d" % repository.id )
    # Create local repository directory.
    if not os.path.exists( repository_path ):
        os.makedirs( repository_path )
    # Create the local repository.
    hg_util.get_repo_for_repository( app, repository=None, repo_path=repository_path, create=True )
    # Add an entry in the hgweb.config file for the local repository.
    lhs = "repos/%s/%s" % ( repository.user.username, repository.name )
    app.hgweb_config_manager.add_entry( lhs, repository_path )
    # Create a .hg/hgrc file for the local repository.
    hg_util.create_hgrc_file( app, repository )
    flush_needed = False
    if category_ids:
        # Create category associations
        for category_id in category_ids:
            category = sa_session.query( app.model.Category ) \
                                 .get( app.security.decode_id( category_id ) )
            rca = app.model.RepositoryCategoryAssociation( repository, category )
            sa_session.add( rca )
            flush_needed = True
    if flush_needed:
        sa_session.flush()
    # Update the repository registry.
    app.repository_registry.add_entry( repository )
    message = "Repository <b>%s</b> has been created." % escape( str( repository.name ) )
    return repository, message
Ejemplo n.º 3
0
    def get_ordered_installable_revisions( self, trans, name, owner, **kwd ):
        """
        GET /api/repositories/get_ordered_installable_revisions

        :param name: the name of the Repository
        :param owner: the owner of the Repository

        Returns the ordered list of changeset revision hash strings that are associated with installable revisions.
        As in the changelog, the list is ordered oldest to newest.
        """
        # Example URL: http://localhost:9009/api/repositories/get_installable_revisions?name=add_column&owner=test
        if name and owner:
            # Get the repository information.
            repository = suc.get_repository_by_name_and_owner( trans.app, name, owner )
            if repository is None:
                error_message = "Error in the Tool Shed repositories API in get_ordered_installable_revisions: "
                error_message += "cannot locate repository %s owned by %s." % ( str( name ), str( owner ) )
                log.debug( error_message )
                return []
            repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )
            ordered_installable_revisions = suc.get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True )
            return ordered_installable_revisions
        else:
            error_message = "Error in the Tool Shed repositories API in get_ordered_installable_revisions: "
            error_message += "invalid name %s or owner %s received." % ( str( name ), str( owner ) )
            log.debug( error_message )
            return []
Ejemplo n.º 4
0
 def get_version_lineage_for_tool( self, repository_id, repository_metadata, guid ):
     """
     Return the tool version lineage chain in descendant order for the received
     guid contained in the received repsitory_metadata.tool_versions.  This function
     is called only from the Tool Shed.
     """
     repository = suc.get_repository_by_id( self.app, repository_id )
     repo = hg_util.get_repo_for_repository( self.app, repository=repository, repo_path=None, create=False )
     # Initialize the tool lineage
     version_lineage = [ guid ]
     # Get all ancestor guids of the received guid.
     current_child_guid = guid
     for changeset in hg_util.reversed_upper_bounded_changelog( repo, repository_metadata.changeset_revision ):
         ctx = repo.changectx( changeset )
         rm = suc.get_repository_metadata_by_changeset_revision( self.app, repository_id, str( ctx ) )
         if rm:
             parent_guid = rm.tool_versions.get( current_child_guid, None )
             if parent_guid:
                 version_lineage.append( parent_guid )
                 current_child_guid = parent_guid
     # Get all descendant guids of the received guid.
     current_parent_guid = guid
     for changeset in hg_util.reversed_lower_upper_bounded_changelog( repo,
                                                                      repository_metadata.changeset_revision,
                                                                      repository.tip( self.app ) ):
         ctx = repo.changectx( changeset )
         rm = suc.get_repository_metadata_by_changeset_revision( self.app, repository_id, str( ctx ) )
         if rm:
             tool_versions = rm.tool_versions
             for child_guid, parent_guid in tool_versions.items():
                 if parent_guid == current_parent_guid:
                     version_lineage.insert( 0, child_guid )
                     current_parent_guid = child_guid
                     break
     return version_lineage
Ejemplo n.º 5
0
def get_latest_downloadable_repository_metadata( trans, repository ):
    """
    Return the latest downloadable repository_metadata record for the received repository.  This will
    return repositories of type unrestricted as well as types repository_suite_definition and
     tool_dependency_definition.
    """
    encoded_repository_id = trans.security.encode_id( repository.id )
    repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )
    tip_ctx = str( repo.changectx( repo.changelog.tip() ) )
    repository_metadata = None
    try:
        repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans.app, encoded_repository_id, tip_ctx )
        if repository_metadata is not None and repository_metadata.downloadable:
            return repository_metadata
        return None
    except:
        latest_downloadable_revision = metadata_util.get_previous_metadata_changeset_revision( repository,
                                                                                               repo,
                                                                                               tip_ctx,
                                                                                               downloadable=True )
        if latest_downloadable_revision == hg_util.INITIAL_CHANGELOG_HASH:
            return None
        repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans.app,
                                                                                 encoded_repository_id,
                                                                                 latest_downloadable_revision )
        if repository_metadata is not None and repository_metadata.downloadable:
            return repository_metadata
        return None
Ejemplo n.º 6
0
 def upload( self, trans, **kwd ):
     message = escape( kwd.get( 'message', '' ) )
     status = kwd.get( 'status', 'done' )
     commit_message = escape( kwd.get( 'commit_message', 'Uploaded'  ) )
     category_ids = util.listify( kwd.get( 'category_id', '' ) )
     categories = suc.get_categories( trans.app )
     repository_id = kwd.get( 'repository_id', '' )
     repository = suc.get_repository_in_tool_shed( trans.app, repository_id )
     repo_dir = repository.repo_path( trans.app )
     repo = hg_util.get_repo_for_repository( trans.app, repository=None, repo_path=repo_dir, create=False )
     uncompress_file = util.string_as_bool( kwd.get( 'uncompress_file', 'true' ) )
     remove_repo_files_not_in_tar = util.string_as_bool( kwd.get( 'remove_repo_files_not_in_tar', 'true' ) )
     uploaded_file = None
     upload_point = commit_util.get_upload_point( repository, **kwd )
     tip = repository.tip( trans.app )
     file_data = kwd.get( 'file_data', '' )
     url = kwd.get( 'url', '' )
     # Part of the upload process is sending email notification to those that have registered to
     # receive them.  One scenario occurs when the first change set is produced for the repository.
     # See the suc.handle_email_alerts() method for the definition of the scenarios.
     new_repo_alert = repository.is_new( trans.app )
     uploaded_directory = None
     if kwd.get( 'upload_button', False ):
         if file_data == '' and url == '':
             message = 'No files were entered on the upload form.'
             status = 'error'
             uploaded_file = None
         elif url and url.startswith( 'hg' ):
             # Use mercurial clone to fetch repository, contents will then be copied over.
             uploaded_directory = tempfile.mkdtemp()
             repo_url = 'http%s' % url[ len( 'hg' ): ]
             repo_url = repo_url.encode( 'ascii', 'replace' )
             try:
                 commands.clone( hg_util.get_configured_ui(), repo_url, uploaded_directory )
             except Exception, e:
                 message = 'Error uploading via mercurial clone: %s' % basic_util.to_html_string( str( e ) )
                 status = 'error'
                 basic_util.remove_dir( uploaded_directory )
                 uploaded_directory = None
         elif url:
             valid_url = True
             try:
                 stream = urllib.urlopen( url )
             except Exception, e:
                 valid_url = False
                 message = 'Error uploading file via http: %s' % str( e )
                 status = 'error'
                 uploaded_file = None
             if valid_url:
                 fd, uploaded_file_name = tempfile.mkstemp()
                 uploaded_file = open( uploaded_file_name, 'wb' )
                 while 1:
                     chunk = stream.read( util.CHUNK_SIZE )
                     if not chunk:
                         break
                     uploaded_file.write( chunk )
                 uploaded_file.flush()
                 uploaded_file_filename = url.split( '/' )[ -1 ]
                 isempty = os.path.getsize( os.path.abspath( uploaded_file_name ) ) == 0
Ejemplo n.º 7
0
def build_readme_files_dict( app, repository, changeset_revision, metadata, tool_path=None ):
    """
    Return a dictionary of valid readme file name <-> readme file content pairs for all readme files defined in the received metadata.  Since the
    received changeset_revision (which is associated with the received metadata) may not be the latest installable changeset revision, the README
    file contents may not be available on disk.  This method is used by both Galaxy and the Tool Shed.
    """
    if app.name == 'galaxy':
        can_use_disk_files = True
    else:
        repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
        latest_downloadable_changeset_revision = suc.get_latest_downloadable_changeset_revision( app, repository, repo )
        can_use_disk_files = changeset_revision == latest_downloadable_changeset_revision
    readme_files_dict = {}
    if metadata:
        if 'readme_files' in metadata:
            for relative_path_to_readme_file in metadata[ 'readme_files' ]:
                readme_file_name = os.path.split( relative_path_to_readme_file )[ 1 ]
                if can_use_disk_files:
                    if tool_path:
                        full_path_to_readme_file = os.path.abspath( os.path.join( tool_path, relative_path_to_readme_file ) )
                    else:
                        full_path_to_readme_file = os.path.abspath( relative_path_to_readme_file )
                    text = None
                    try:
                        f = open( full_path_to_readme_file, 'r' )
                        text = unicodify( f.read() )
                        f.close()
                    except Exception, e:
                        log.exception( "Error reading README file '%s' from disk: %s" % ( str( relative_path_to_readme_file ), str( e ) ) )
                        text = None
                    if text:
                        text_of_reasonable_length = basic_util.size_string( text )
                        if text_of_reasonable_length.find( '.. image:: ' ) >= 0:
                            # Handle image display for README files that are contained in repositories in the tool shed or installed into Galaxy.
                            lock = threading.Lock()
                            lock.acquire( True )
                            try:
                                text_of_reasonable_length = suc.set_image_paths( app,
                                                                                 app.security.encode_id( repository.id ),
                                                                                 text_of_reasonable_length )
                            except Exception, e:
                                log.exception( "Exception in build_readme_files_dict, so images may not be properly displayed:\n%s" % str( e ) )
                            finally:
                                lock.release()
                        if readme_file_name.endswith( '.rst' ):
                            text_of_reasonable_length = Template( rst_to_html( text_of_reasonable_length ),
                                                                  input_encoding='utf-8',
                                                                  output_encoding='utf-8',
                                                                  default_filters=[ 'decode.utf8' ],
                                                                  encoding_errors='replace' )
                            text_of_reasonable_length = text_of_reasonable_length.render( static_path=web.url_for( '/static' ),
                                                                                          host_url=web.url_for( '/', qualified=True ) )
                            text_of_reasonable_length = unicodify( text_of_reasonable_length )
                        else:
                            text_of_reasonable_length = basic_util.to_html_string( text_of_reasonable_length )
                        readme_files_dict[ readme_file_name ] = text_of_reasonable_length
Ejemplo n.º 8
0
def get_latest_repository_metadata(app, decoded_repository_id, downloadable=False):
    """Get last metadata defined for a specified repository from the database."""
    sa_session = app.model.context.current
    repository = sa_session.query(app.model.Repository).get(decoded_repository_id)
    repo = hg_util.get_repo_for_repository(app, repository=repository, repo_path=None, create=False)
    if downloadable:
        changeset_revision = get_latest_downloadable_changeset_revision(app, repository, repo)
    else:
        changeset_revision = get_latest_changeset_revision(app, repository, repo)
    return get_repository_metadata_by_changeset_revision(app, app.security.encode_id(repository.id), changeset_revision)
Ejemplo n.º 9
0
def get_repo_info_dict( app, user, repository_id, changeset_revision ):
    repository = suc.get_repository_in_tool_shed( app, repository_id )
    repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
    repository_clone_url = common_util.generate_clone_url_for_repository_in_tool_shed( user, repository )
    repository_metadata = suc.get_repository_metadata_by_changeset_revision( app,
                                                                             repository_id,
                                                                             changeset_revision )
    if not repository_metadata:
        # The received changeset_revision is no longer installable, so get the next changeset_revision
        # in the repository's changelog.  This generally occurs only with repositories of type
        # repository_suite_definition or tool_dependency_definition.
        next_downloadable_changeset_revision = \
            suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
        if next_downloadable_changeset_revision:
            repository_metadata = suc.get_repository_metadata_by_changeset_revision( app,
                                                                                     repository_id,
                                                                                     next_downloadable_changeset_revision )
    if repository_metadata:
        # For now, we'll always assume that we'll get repository_metadata, but if we discover our assumption
        # is not valid we'll have to enhance the callers to handle repository_metadata values of None in the
        # returned repo_info_dict.
        metadata = repository_metadata.metadata
        if 'tools' in metadata:
            includes_tools = True
        else:
            includes_tools = False
        includes_tools_for_display_in_tool_panel = repository_metadata.includes_tools_for_display_in_tool_panel
        repository_dependencies_dict = metadata.get( 'repository_dependencies', {} )
        repository_dependencies = repository_dependencies_dict.get( 'repository_dependencies', [] )
        has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td = \
            suc.get_repository_dependency_types( repository_dependencies )
        if 'tool_dependencies' in metadata:
            includes_tool_dependencies = True
        else:
            includes_tool_dependencies = False
    else:
        # Here's where we may have to handle enhancements to the callers. See above comment.
        includes_tools = False
        has_repository_dependencies = False
        has_repository_dependencies_only_if_compiling_contained_td = False
        includes_tool_dependencies = False
        includes_tools_for_display_in_tool_panel = False
    ctx = hg_util.get_changectx_for_changeset( repo, changeset_revision )
    repo_info_dict = create_repo_info_dict( app=app,
                                            repository_clone_url=repository_clone_url,
                                            changeset_revision=changeset_revision,
                                            ctx_rev=str( ctx.rev() ),
                                            repository_owner=repository.user.username,
                                            repository_name=repository.name,
                                            repository=repository,
                                            repository_metadata=repository_metadata,
                                            tool_dependencies=None,
                                            repository_dependencies=None )
    return repo_info_dict, includes_tools, includes_tool_dependencies, includes_tools_for_display_in_tool_panel, \
        has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td
Ejemplo n.º 10
0
def get_dependencies_for_metadata_revision( app, metadata ):
    dependencies = []
    for shed, name, owner, changeset, prior, _ in metadata[ 'repository_dependencies' ]:
        required_repository = tool_shed.util.repository_util.get_repository_by_name_and_owner( app, name, owner )
        repo = hg_util.get_repo_for_repository( app, repository=required_repository, repo_path=None, create=False )
        updated_changeset = get_next_downloadable_changeset_revision( required_repository, repo, changeset )
        if updated_changeset is None:
            continue
        metadata_entry = get_repository_metadata_by_changeset_revision( app, app.security.encode_id( required_repository.id ), updated_changeset )
        dependencies.append( metadata_entry )
    return dependencies
Ejemplo n.º 11
0
def get_latest_downloadable_changeset_revision( app, repository, repo=None ):
    if repo is None:
        repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
    repository_tip = repository.tip( app )
    repository_metadata = get_repository_metadata_by_changeset_revision( app, app.security.encode_id( repository.id ), repository_tip )
    if repository_metadata and repository_metadata.downloadable:
        return repository_tip
    changeset_revisions = [ revision[ 1 ] for revision in get_metadata_revisions( repository, repo ) ]
    if changeset_revisions:
        return changeset_revisions[ -1 ]
    return hg_util.INITIAL_CHANGELOG_HASH
 def load_tool_from_changeset_revision( self, repository_id, changeset_revision, tool_config_filename ):
     """
     Return a loaded tool whose tool config file name (e.g., filtering.xml) is the value
     of tool_config_filename.  The value of changeset_revision is a valid (downloadable)
     changeset revision.  The tool config will be located in the repository manifest between
     the received valid changeset revision and the first changeset revision in the repository,
     searching backwards.
     """
     original_tool_data_path = self.app.config.tool_data_path
     repository = suc.get_repository_in_tool_shed( self.app, repository_id )
     repo_files_dir = repository.repo_path( self.app )
     repo = hg_util.get_repo_for_repository( self.app, repository=None, repo_path=repo_files_dir, create=False )
     message = ''
     tool = None
     can_use_disk_file = False
     tool_config_filepath = suc.get_absolute_path_to_file_in_repository( repo_files_dir, tool_config_filename )
     work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-ltfcr" )
     can_use_disk_file = self.can_use_tool_config_disk_file( repository,
                                                             repo,
                                                             tool_config_filepath,
                                                             changeset_revision )
     if can_use_disk_file:
         self.app.config.tool_data_path = work_dir
         tool, valid, message, sample_files = \
             self.handle_sample_files_and_load_tool_from_disk( repo_files_dir,
                                                               repository_id,
                                                               tool_config_filepath,
                                                               work_dir )
         if tool is not None:
             invalid_files_and_errors_tups = \
                 self.check_tool_input_params( repo_files_dir,
                                               tool_config_filename,
                                               tool,
                                               sample_files )
             if invalid_files_and_errors_tups:
                 message2 = tool_util.generate_message_for_invalid_tools( self.app,
                                                                          invalid_files_and_errors_tups,
                                                                          repository,
                                                                          metadata_dict=None,
                                                                          as_html=True,
                                                                          displaying_invalid_tool=True )
                 message = self.concat_messages( message, message2 )
     else:
         tool, message, sample_files = \
             self.handle_sample_files_and_load_tool_from_tmp_config( repo,
                                                                     repository_id,
                                                                     changeset_revision,
                                                                     tool_config_filename,
                                                                     work_dir )
     basic_util.remove_dir( work_dir )
     self.app.config.tool_data_path = original_tool_data_path
     # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
     self.tdtm.reset_tool_data_tables()
     return repository, tool, message
Ejemplo n.º 13
0
def has_previous_repository_reviews( app, repository, changeset_revision ):
    """
    Determine if a repository has a changeset revision review prior to the
    received changeset revision.
    """
    repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
    reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ]
    for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ):
        previous_changeset_revision = str( repo.changectx( changeset ) )
        if previous_changeset_revision in reviewed_revision_hashes:
            return True
    return False
 def get_value(self, trans, grid, repository):
     # Restrict to revisions that have been reviewed.
     if repository.reviews:
         rval = ''
         repo = hg_util.get_repo_for_repository(trans.app, repository=repository)
         for review in repository.reviews:
             changeset_revision = review.changeset_revision
             rev, label = hg_util.get_rev_label_from_changeset_revision(repo, changeset_revision)
             rval += '<a href="manage_repository_reviews_of_revision?id=%s&changeset_revision=%s">%s</a><br/>' % \
                 (trans.security.encode_id(repository.id), changeset_revision, label)
         return rval
     return ''
Ejemplo n.º 15
0
 def get_certified_level_one_tuple( self, repository ):
     """
     Return True if the latest installable changeset_revision of the received repository is level one certified.
     """
     if repository is None:
         return ( None, False )
     if repository.deleted or repository.deprecated:
         return ( None, False )
     repo = hg_util.get_repo_for_repository( self.app, repository=repository, repo_path=None, create=False )
     # Get the latest installable changeset revision since that is all that is currently configured for testing.
     latest_installable_changeset_revision = suc.get_latest_downloadable_changeset_revision( self.app, repository, repo )
     if latest_installable_changeset_revision not in [ None, hg_util.INITIAL_CHANGELOG_HASH ]:
         encoded_repository_id = self.app.security.encode_id( repository.id )
         repository_metadata = suc.get_repository_metadata_by_changeset_revision( self.app,
                                                                                  encoded_repository_id,
                                                                                  latest_installable_changeset_revision )
         if repository_metadata:
             # Filter out repository revisions that have not been tested.
             if repository_metadata.time_last_tested is not None and repository_metadata.tool_test_results is not None:
                 if repository.type in [ rt_util.REPOSITORY_SUITE_DEFINITION, rt_util.TOOL_DEPENDENCY_DEFINITION ]:
                     # Look in the tool_test_results dictionary for installation errors.
                     try:
                         tool_test_results_dict = repository_metadata.tool_test_results[ 0 ]
                     except Exception, e:
                         message = 'Error attempting to retrieve install and test results for repository %s:\n' % str( repository.name )
                         message += '%s' % str( e )
                         log.exception( message )
                         return ( latest_installable_changeset_revision, False )
                     if 'installation_errors' in tool_test_results_dict:
                         return ( latest_installable_changeset_revision, False )
                     return ( latest_installable_changeset_revision, True )
                 else:
                     # We have a repository with type Unrestricted.
                     if repository_metadata.includes_tools:
                         if repository_metadata.tools_functionally_correct:
                             return ( latest_installable_changeset_revision, True )
                         return ( latest_installable_changeset_revision, False )
                     else:
                         # Look in the tool_test_results dictionary for installation errors.
                         try:
                             tool_test_results_dict = repository_metadata.tool_test_results[ 0 ]
                         except Exception, e:
                             message = 'Error attempting to retrieve install and test results for repository %s:\n' % str( repository.name )
                             message += '%s' % str( e )
                             log.exception( message )
                             return ( latest_installable_changeset_revision, False )
                         if 'installation_errors' in tool_test_results_dict:
                             return ( latest_installable_changeset_revision, False )
                         return ( latest_installable_changeset_revision, True )
             else:
                 # No test results.
                 return ( latest_installable_changeset_revision, False )
Ejemplo n.º 16
0
 def browse_review( self, trans, **kwd ):
     message = escape( kwd.get( 'message', '' ) )
     status = kwd.get( 'status', 'done' )
     review = review_util.get_review( trans.app, kwd[ 'id' ] )
     repository = review.repository
     repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )
     rev, changeset_revision_label = hg_util.get_rev_label_from_changeset_revision( repo, review.changeset_revision )
     return trans.fill_template( '/webapps/tool_shed/repository_review/browse_review.mako',
                                 repository=repository,
                                 changeset_revision_label=changeset_revision_label,
                                 review=review,
                                 message=message,
                                 status=status )
Ejemplo n.º 17
0
def handle_directory_changes( app, host, username, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar,
                              new_repo_alert, commit_message, undesirable_dirs_removed, undesirable_files_removed ):
    repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
    content_alert_str = ''
    files_to_remove = []
    filenames_in_archive = [ os.path.join( full_path, name ) for name in filenames_in_archive ]
    if remove_repo_files_not_in_tar and not repository.is_new( app ):
        # We have a repository that is not new (it contains files), so discover those files that are in the
        # repository, but not in the uploaded archive.
        for root, dirs, files in os.walk( full_path ):
            if root.find( '.hg' ) < 0 and root.find( 'hgrc' ) < 0:
                for undesirable_dir in UNDESIRABLE_DIRS:
                    if undesirable_dir in dirs:
                        dirs.remove( undesirable_dir )
                        undesirable_dirs_removed += 1
                for undesirable_file in UNDESIRABLE_FILES:
                    if undesirable_file in files:
                        files.remove( undesirable_file )
                        undesirable_files_removed += 1
                for name in files:
                    full_name = os.path.join( root, name )
                    if full_name not in filenames_in_archive:
                        files_to_remove.append( full_name )
        for repo_file in files_to_remove:
            # Remove files in the repository (relative to the upload point) that are not in
            # the uploaded archive.
            try:
                hg_util.remove_file( repo.ui, repo, repo_file, force=True )
            except Exception, e:
                log.debug( "Error removing files using the mercurial API, so trying a different approach, the error was: %s" % str( e ))
                relative_selected_file = repo_file.split( 'repo_%d' % repository.id )[1].lstrip( '/' )
                repo.dirstate.remove( relative_selected_file )
                repo.dirstate.write()
                absolute_selected_file = os.path.abspath( repo_file )
                if os.path.isdir( absolute_selected_file ):
                    try:
                        os.rmdir( absolute_selected_file )
                    except OSError, e:
                        # The directory is not empty.
                        pass
                elif os.path.isfile( absolute_selected_file ):
                    os.remove( absolute_selected_file )
                    dir = os.path.split( absolute_selected_file )[0]
                    try:
                        os.rmdir( dir )
                    except OSError, e:
                        # The directory is not empty.
                        pass
def should_set_do_not_test_flag( app, repository, changeset_revision, testable_revision ):
    """
    The received testable_revision is True if the tool has defined tests and test files are in the repository
    This method returns True if the received repository has multiple downloadable revisions and the received
    changeset_revision is not the most recent downloadable revision and the received testable_revision is False.
    In this case, the received changeset_revision will never be updated with correct data, and re-testing it
    would be redundant.
    """
    if not testable_revision:
        repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
        changeset_revisions = suc.get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True )
        if len( changeset_revisions ) > 1:
            latest_downloadable_revision = changeset_revisions[ -1 ]
            if changeset_revision != latest_downloadable_revision:
                return True
    return False
Ejemplo n.º 19
0
 def manage_repository_reviews( self, trans, mine=False, **kwd ):
     # The value of the received id is the encoded repository id.
     message = escape( kwd.get( 'message', '' ) )
     status = kwd.get( 'status', 'done' )
     repository_id = kwd.get( 'id', None )
     if repository_id:
         repository = suc.get_repository_in_tool_shed( trans.app, repository_id )
         repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )
         metadata_revision_hashes = [ metadata_revision.changeset_revision for metadata_revision in repository.metadata_revisions ]
         reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ]
         reviews_dict = odict()
         for changeset in hg_util.get_reversed_changelog_changesets( repo ):
             ctx = repo.changectx( changeset )
             changeset_revision = str( ctx )
             if changeset_revision in metadata_revision_hashes or changeset_revision in reviewed_revision_hashes:
                 rev, changeset_revision_label = hg_util.get_rev_label_from_changeset_revision( repo, changeset_revision )
                 if changeset_revision in reviewed_revision_hashes:
                     # Find the review for this changeset_revision
                     repository_reviews = \
                         review_util.get_reviews_by_repository_id_changeset_revision( trans.app,
                                                                                      repository_id,
                                                                                      changeset_revision )
                     # Determine if the current user can add a review to this revision.
                     can_add_review = trans.user not in [ repository_review.user for repository_review in repository_reviews ]
                     repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans.app, repository_id, changeset_revision )
                     if repository_metadata:
                         repository_metadata_reviews = util.listify( repository_metadata.reviews )
                     else:
                         repository_metadata_reviews = []
                 else:
                     repository_reviews = []
                     repository_metadata_reviews = []
                     can_add_review = True
                 installable = changeset_revision in metadata_revision_hashes
                 revision_dict = dict( changeset_revision_label=changeset_revision_label,
                                       repository_reviews=repository_reviews,
                                       repository_metadata_reviews=repository_metadata_reviews,
                                       installable=installable,
                                       can_add_review=can_add_review )
                 reviews_dict[ changeset_revision ] = revision_dict
     return trans.fill_template( '/webapps/tool_shed/repository_review/reviews_of_repository.mako',
                                 repository=repository,
                                 reviews_dict=reviews_dict,
                                 mine=mine,
                                 message=message,
                                 status=status )
Ejemplo n.º 20
0
 def select_previous_review( self, trans, **kwd ):
     # The value of the received id is the encoded repository id.
     message = escape( kwd.get( 'message', '' ) )
     status = kwd.get( 'status', 'done' )
     repository = suc.get_repository_in_tool_shed( trans.app, kwd[ 'id' ] )
     changeset_revision = kwd.get( 'changeset_revision', None )
     repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )
     previous_reviews_dict = review_util.get_previous_repository_reviews( trans.app,
                                                                          repository,
                                                                          changeset_revision )
     rev, changeset_revision_label = hg_util.get_rev_label_from_changeset_revision( repo, changeset_revision )
     return trans.fill_template( '/webapps/tool_shed/repository_review/select_previous_review.mako',
                                 repository=repository,
                                 changeset_revision=changeset_revision,
                                 changeset_revision_label=changeset_revision_label,
                                 previous_reviews_dict=previous_reviews_dict,
                                 message=message,
                                 status=status )
Ejemplo n.º 21
0
def get_current_repository_metadata_for_changeset_revision(app, repository, changeset_revision):
    encoded_repository_id = app.security.encode_id(repository.id)
    repository_metadata = get_repository_metadata_by_changeset_revision(app, encoded_repository_id, changeset_revision)
    if repository_metadata:
        return repository_metadata
    # The installable changeset_revision may have been changed because it was "moved ahead"
    # in the repository changelog.
    repo = hg_util.get_repo_for_repository(app, repository=repository, repo_path=None, create=False)
    updated_changeset_revision = get_next_downloadable_changeset_revision(
        repository, repo, after_changeset_revision=changeset_revision
    )
    if updated_changeset_revision and updated_changeset_revision != changeset_revision:
        repository_metadata = get_repository_metadata_by_changeset_revision(
            app, encoded_repository_id, updated_changeset_revision
        )
        if repository_metadata:
            return repository_metadata
    return None
Ejemplo n.º 22
0
 def get_version_lineage_for_tool(self, repository_id, repository_metadata,
                                  guid):
     """
     Return the tool version lineage chain in descendant order for the received
     guid contained in the received repsitory_metadata.tool_versions.  This function
     is called only from the Tool Shed.
     """
     repository = suc.get_repository_by_id(self.app, repository_id)
     repo = hg_util.get_repo_for_repository(self.app,
                                            repository=repository,
                                            repo_path=None,
                                            create=False)
     # Initialize the tool lineage
     version_lineage = [guid]
     # Get all ancestor guids of the received guid.
     current_child_guid = guid
     for changeset in hg_util.reversed_upper_bounded_changelog(
             repo, repository_metadata.changeset_revision):
         ctx = repo.changectx(changeset)
         rm = suc.get_repository_metadata_by_changeset_revision(
             self.app, repository_id, str(ctx))
         if rm:
             parent_guid = rm.tool_versions.get(current_child_guid, None)
             if parent_guid:
                 version_lineage.append(parent_guid)
                 current_child_guid = parent_guid
     # Get all descendant guids of the received guid.
     current_parent_guid = guid
     for changeset in hg_util.reversed_lower_upper_bounded_changelog(
             repo, repository_metadata.changeset_revision,
             repository.tip(self.app)):
         ctx = repo.changectx(changeset)
         rm = suc.get_repository_metadata_by_changeset_revision(
             self.app, repository_id, str(ctx))
         if rm:
             tool_versions = rm.tool_versions
             for child_guid, parent_guid in tool_versions.items():
                 if parent_guid == current_parent_guid:
                     version_lineage.insert(0, child_guid)
                     current_parent_guid = child_guid
                     break
     return version_lineage
Ejemplo n.º 23
0
def get_current_repository_metadata_for_changeset_revision(app, repository, changeset_revision):
    encoded_repository_id = app.security.encode_id(repository.id)
    repository_metadata = get_repository_metadata_by_changeset_revision(app,
                                                                        encoded_repository_id,
                                                                        changeset_revision)
    if repository_metadata:
        return repository_metadata
    # The installable changeset_revision may have been changed because it was "moved ahead"
    # in the repository changelog.
    repo = hg_util.get_repo_for_repository(app, repository=repository, repo_path=None, create=False)
    updated_changeset_revision = get_next_downloadable_changeset_revision(repository,
                                                                          repo,
                                                                          after_changeset_revision=changeset_revision)
    if updated_changeset_revision and updated_changeset_revision != changeset_revision:
        repository_metadata = get_repository_metadata_by_changeset_revision(app,
                                                                            encoded_repository_id,
                                                                            updated_changeset_revision)
        if repository_metadata:
            return repository_metadata
    return None
Ejemplo n.º 24
0
 def set_allow_push(self, app, usernames, remove_auth=''):
     allow_push = util.listify(self.allow_push(app))
     if remove_auth:
         allow_push.remove(remove_auth)
     else:
         for username in util.listify(usernames):
             if username not in allow_push:
                 allow_push.append(username)
     allow_push = '%s\n' % ','.join(allow_push)
     repo = hg_util.get_repo_for_repository(app, repository=self)
     # Why doesn't the following work?
     # repo.ui.setconfig( 'web', 'allow_push', allow_push )
     lines = repo.opener('hgrc', 'rb').readlines()
     fp = repo.opener('hgrc', 'wb')
     for line in lines:
         if line.startswith('allow_push'):
             fp.write('allow_push = %s' % allow_push)
         else:
             fp.write(line)
     fp.close()
Ejemplo n.º 25
0
 def set_allow_push(self, app, usernames, remove_auth=''):
     allow_push = util.listify(self.allow_push(app))
     if remove_auth:
         allow_push.remove(remove_auth)
     else:
         for username in util.listify(usernames):
             if username not in allow_push:
                 allow_push.append(username)
     allow_push = '%s\n' % ','.join(allow_push)
     repo = hg_util.get_repo_for_repository(app, repository=self)
     # Why doesn't the following work?
     # repo.ui.setconfig( 'web', 'allow_push', allow_push )
     lines = repo.opener('hgrc', 'rb').readlines()
     fp = repo.opener('hgrc', 'wb')
     for line in lines:
         if line.startswith('allow_push'):
             fp.write('allow_push = %s' % allow_push)
         else:
             fp.write(line)
     fp.close()
Ejemplo n.º 26
0
def get_updated_changeset_revisions( app, name, owner, changeset_revision ):
    """
    Return a string of comma-separated changeset revision hashes for all available updates to the received changeset
    revision for the repository defined by the received name and owner.
    """
    repository = tool_shed.util.repository_util.get_repository_by_name_and_owner( app, name, owner )
    repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
    # Get the upper bound changeset revision.
    upper_bound_changeset_revision = get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
    # Build the list of changeset revision hashes defining each available update up to, but excluding
    # upper_bound_changeset_revision.
    changeset_hashes = []
    for changeset in hg_util.reversed_lower_upper_bounded_changelog( repo, changeset_revision, upper_bound_changeset_revision ):
        # Make sure to exclude upper_bound_changeset_revision.
        if changeset != upper_bound_changeset_revision:
            changeset_hashes.append( str( repo.changectx( changeset ) ) )
    if changeset_hashes:
        changeset_hashes_str = ','.join( changeset_hashes )
        return changeset_hashes_str
    return ''
Ejemplo n.º 27
0
def get_updated_changeset_revisions(app, name, owner, changeset_revision):
    """
    Return a string of comma-separated changeset revision hashes for all available updates to the received changeset
    revision for the repository defined by the received name and owner.
    """
    repository = tool_shed.util.repository_util.get_repository_by_name_and_owner(app, name, owner)
    repo = hg_util.get_repo_for_repository(app, repository=repository, repo_path=None, create=False)
    # Get the upper bound changeset revision.
    upper_bound_changeset_revision = get_next_downloadable_changeset_revision(repository, repo, changeset_revision)
    # Build the list of changeset revision hashes defining each available update up to, but excluding
    # upper_bound_changeset_revision.
    changeset_hashes = []
    for changeset in hg_util.reversed_lower_upper_bounded_changelog(repo, changeset_revision, upper_bound_changeset_revision):
        # Make sure to exclude upper_bound_changeset_revision.
        if changeset != upper_bound_changeset_revision:
            changeset_hashes.append(str(repo.changectx(changeset)))
    if changeset_hashes:
        changeset_hashes_str = ','.join(changeset_hashes)
        return changeset_hashes_str
    return ''
Ejemplo n.º 28
0
def get_latest_repository_metadata(trans, repository):
    """
    Return the latest repository_metadata record for the received repository if it exists.  This will
    return repositories of type unrestricted as well as types repository_suite_definition and
     tool_dependency_definition.
    """
    encoded_repository_id = trans.security.encode_id(repository.id)
    repo = hg_util.get_repo_for_repository(trans.app, repository=repository)
    tip_ctx = str(repo[repo.changelog.tip()])
    try:
        repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, encoded_repository_id, tip_ctx)
        return repository_metadata
    except Exception:
        latest_downloadable_revision = metadata_util.get_previous_metadata_changeset_revision(trans.app, repository, tip_ctx, downloadable=False)
        if latest_downloadable_revision == hg_util.INITIAL_CHANGELOG_HASH:
            return None
        repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app,
                                                                                          encoded_repository_id,
                                                                                          latest_downloadable_revision)
        return repository_metadata
 def is_valid_for_type(self, app, repository, revisions_to_check=None):
     """
     Inspect the received repository's contents to determine if they abide by the rules defined for the contents of this type.
     If the received revisions_to_check is a list of changeset revisions, then inspection will be restricted to the revisions
     in the list.
     """
     repo = hg_util.get_repo_for_repository(app, repository=repository)
     if revisions_to_check:
         changeset_revisions = revisions_to_check
     else:
         changeset_revisions = repo.changelog
     for changeset in changeset_revisions:
         ctx = repo.changectx(changeset)
         # Inspect all files in the changeset (in sorted order) to make sure there is only one and it is named tool_dependencies.xml.
         files_changed_in_changeset = ctx.files()
         for file_path in files_changed_in_changeset:
             file_name = basic_util.strip_path(file_path)
             if file_name not in self.valid_file_names:
                 return False
     return True
 def is_valid_for_type(self, app, repository, revisions_to_check=None):
     """
     Inspect the received repository's contents to determine if they abide by the rules defined for the contents of this type.
     If the received revisions_to_check is a list of changeset revisions, then inspection will be restricted to the revisions
     in the list.
     """
     repo = hg_util.get_repo_for_repository(app, repository=repository)
     if revisions_to_check:
         changeset_revisions = revisions_to_check
     else:
         changeset_revisions = repo.changelog
     for changeset in changeset_revisions:
         ctx = repo.changectx(changeset)
         # Inspect all files in the changeset (in sorted order) to make sure there is only one and it is named tool_dependencies.xml.
         files_changed_in_changeset = ctx.files()
         for file_path in files_changed_in_changeset:
             file_name = basic_util.strip_path(file_path)
             if file_name not in self.valid_file_names:
                 return False
     return True
Ejemplo n.º 31
0
def get_previous_repository_reviews( app, repository, changeset_revision ):
    """
    Return an ordered dictionary of repository reviews up to and including the
    received changeset revision.
    """
    repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
    reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ]
    previous_reviews_dict = odict()
    for changeset in hg_util.reversed_upper_bounded_changelog( repo, changeset_revision ):
        previous_changeset_revision = str( repo.changectx( changeset ) )
        if previous_changeset_revision in reviewed_revision_hashes:
            previous_rev, previous_changeset_revision_label = \
                hg_util.get_rev_label_from_changeset_revision( repo, previous_changeset_revision )
            revision_reviews = get_reviews_by_repository_id_changeset_revision( app,
                                                                                app.security.encode_id( repository.id ),
                                                                                previous_changeset_revision )
            previous_reviews_dict[ previous_changeset_revision ] = \
                dict( changeset_revision_label=previous_changeset_revision_label,
                      reviews=revision_reviews )
    return previous_reviews_dict
Ejemplo n.º 32
0
 def select_previous_review(self, trans, **kwd):
     # The value of the received id is the encoded repository id.
     message = escape(kwd.get('message', ''))
     status = kwd.get('status', 'done')
     repository = repository_util.get_repository_in_tool_shed(
         trans.app, kwd['id'])
     changeset_revision = kwd.get('changeset_revision', None)
     repo = hg_util.get_repo_for_repository(trans.app,
                                            repository=repository)
     previous_reviews_dict = review_util.get_previous_repository_reviews(
         trans.app, repository, changeset_revision)
     rev, changeset_revision_label = hg_util.get_rev_label_from_changeset_revision(
         repo, changeset_revision)
     return trans.fill_template(
         '/webapps/tool_shed/repository_review/select_previous_review.mako',
         repository=repository,
         changeset_revision=changeset_revision,
         changeset_revision_label=changeset_revision_label,
         previous_reviews_dict=previous_reviews_dict,
         message=message,
         status=status)
Ejemplo n.º 33
0
def should_set_do_not_test_flag(app, repository, changeset_revision,
                                testable_revision):
    """
    The received testable_revision is True if the tool has defined tests and test files are in the repository
    This method returns True if the received repository has multiple downloadable revisions and the received
    changeset_revision is not the most recent downloadable revision and the received testable_revision is False.
    In this case, the received changeset_revision will never be updated with correct data, and re-testing it
    would be redundant.
    """
    if not testable_revision:
        repo = hg_util.get_repo_for_repository(app,
                                               repository=repository,
                                               repo_path=None,
                                               create=False)
        changeset_revisions = suc.get_ordered_metadata_changeset_revisions(
            repository, repo, downloadable=True)
        if len(changeset_revisions) > 1:
            latest_downloadable_revision = changeset_revisions[-1]
            if changeset_revision != latest_downloadable_revision:
                return True
    return False
Ejemplo n.º 34
0
 def manage_repository_reviews_of_revision( self, trans, **kwd ):
     # The value of the received id is the encoded repository id.
     message = escape( kwd.get( 'message', '' ) )
     status = kwd.get( 'status', 'done' )
     repository_id = kwd.get( 'id', None )
     changeset_revision = kwd.get( 'changeset_revision', None )
     repository = suc.get_repository_in_tool_shed( trans.app, repository_id )
     repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )
     installable = changeset_revision in [ metadata_revision.changeset_revision for metadata_revision in repository.metadata_revisions ]
     rev, changeset_revision_label = hg_util.get_rev_label_from_changeset_revision( repo, changeset_revision )
     reviews = review_util.get_reviews_by_repository_id_changeset_revision( trans.app,
                                                                            repository_id,
                                                                            changeset_revision )
     return trans.fill_template( '/webapps/tool_shed/repository_review/reviews_of_changeset_revision.mako',
                                 repository=repository,
                                 changeset_revision=changeset_revision,
                                 changeset_revision_label=changeset_revision_label,
                                 reviews=reviews,
                                 installable=installable,
                                 message=message,
                                 status=status )
Ejemplo n.º 35
0
def get_next_downloadable_changeset_revision(app, repository, after_changeset_revision):
    """
    Return the installable changeset_revision in the repository changelog after the changeset to which
    after_changeset_revision refers.  If there isn't one, return None. If there is only one installable
    changeset, and that matches the requested revision, return it.
    """
    changeset_revisions = [revision[1] for revision in get_metadata_revisions(app, repository)]
    if len(changeset_revisions) == 1:
        changeset_revision = changeset_revisions[0]
        if changeset_revision == after_changeset_revision:
            return after_changeset_revision
    found_after_changeset_revision = False
    repo = hg_util.get_repo_for_repository(app, repository=repository)
    for changeset in repo.changelog:
        changeset_revision = str(repo.changectx(changeset))
        if found_after_changeset_revision:
            if changeset_revision in changeset_revisions:
                return changeset_revision
        elif changeset_revision == after_changeset_revision:
            # We've found the changeset in the changelog for which we need to get the next downloadable changeset.
            found_after_changeset_revision = True
    return None
Ejemplo n.º 36
0
 def get_certified_level_one_tuple( self, repository ):
     """
     Return True if the latest installable changeset_revision of the received repository is level one certified.
     """
     if repository is None:
         return ( None, False )
     if repository.deleted or repository.deprecated:
         return ( None, False )
     repo = hg_util.get_repo_for_repository( self.app, repository=repository, repo_path=None, create=False )
     # Get the latest installable changeset revision since that is all that is currently configured for testing.
     latest_installable_changeset_revision = metadata_util.get_latest_downloadable_changeset_revision( self.app, repository, repo )
     if latest_installable_changeset_revision not in [ None, hg_util.INITIAL_CHANGELOG_HASH ]:
         encoded_repository_id = self.app.security.encode_id( repository.id )
         repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision( self.app,
                                                                                            encoded_repository_id,
                                                                                            latest_installable_changeset_revision )
         if repository_metadata:
             # No repository_metadata.
             return ( latest_installable_changeset_revision, True )
     else:
         # No installable changeset_revision.
         return ( None, False )
Ejemplo n.º 37
0
 def get_certified_level_one_tuple(self, repository):
     """
     Return True if the latest installable changeset_revision of the received repository is level one certified.
     """
     if repository is None:
         return (None, False)
     if repository.deleted or repository.deprecated:
         return (None, False)
     repo = hg_util.get_repo_for_repository(self.app, repository=repository, repo_path=None, create=False)
     # Get the latest installable changeset revision since that is all that is currently configured for testing.
     latest_installable_changeset_revision = metadata_util.get_latest_downloadable_changeset_revision(self.app, repository, repo)
     if latest_installable_changeset_revision not in [None, hg_util.INITIAL_CHANGELOG_HASH]:
         encoded_repository_id = self.app.security.encode_id(repository.id)
         repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(self.app,
                                                                                           encoded_repository_id,
                                                                                           latest_installable_changeset_revision)
         if repository_metadata:
             # No repository_metadata.
             return (latest_installable_changeset_revision, True)
     else:
         # No installable changeset_revision.
         return (None, False)
Ejemplo n.º 38
0
    def create_changeset_revision(self, trans, id, payload, **kwd):
        """
        POST /api/repositories/{encoded_repository_id}/changeset_revision

        Create a new tool shed repository commit - leaving PUT on parent
        resource open for updating meta-attributes of the repository (and
        Galaxy doesn't allow PUT multipart data anyway
        https://trello.com/c/CQwmCeG6).

        :param id: the encoded id of the Repository object

        The following parameters may be included in the payload.
        :param commit_message: hg commit message for update.
        """

        # Example URL: http://localhost:9009/api/repositories/f9cad7b01a472135
        rdah = attribute_handlers.RepositoryDependencyAttributeHandler(
            trans.app, unpopulate=False)
        tdah = attribute_handlers.ToolDependencyAttributeHandler(
            trans.app, unpopulate=False)

        repository = suc.get_repository_in_tool_shed(trans.app, id)
        repo_dir = repository.repo_path(trans.app)
        repo = hg_util.get_repo_for_repository(trans.app,
                                               repository=None,
                                               repo_path=repo_dir,
                                               create=False)

        upload_point = commit_util.get_upload_point(repository, **kwd)
        tip = repository.tip(trans.app)

        file_data = payload.get('file')
        # Code stolen from gx's upload_common.py
        if isinstance(file_data, FieldStorage):
            assert not isinstance(file_data.file, StringIO.StringIO)
            assert file_data.file.name != '<fdopen>'
            local_filename = util.mkstemp_ln(file_data.file.name,
                                             'upload_file_data_')
            file_data.file.close()
            file_data = dict(filename=file_data.filename,
                             local_filename=local_filename)
        elif type(file_data) == dict and 'local_filename' not in file_data:
            raise Exception(
                'Uploaded file was encoded in a way not understood.')

        commit_message = kwd.get('commit_message', 'Uploaded')

        uploaded_file = open(file_data['local_filename'], 'rb')
        uploaded_file_name = file_data['local_filename']

        isgzip = False
        isbz2 = False
        isgzip = checkers.is_gzip(uploaded_file_name)
        if not isgzip:
            isbz2 = checkers.is_bz2(uploaded_file_name)
        if (isgzip or isbz2):
            # Open for reading with transparent compression.
            tar = tarfile.open(uploaded_file_name, 'r:*')
        else:
            tar = tarfile.open(uploaded_file_name)

        new_repo_alert = False
        remove_repo_files_not_in_tar = True

        ok, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \
            repository_content_util.upload_tar(
                trans,
                rdah,
                tdah,
                repository,
                tar,
                uploaded_file,
                upload_point,
                remove_repo_files_not_in_tar,
                commit_message,
                new_repo_alert
            )
        if ok:
            # Update the repository files for browsing.
            hg_util.update_repository(repo)
            # Get the new repository tip.
            if tip == repository.tip(trans.app):
                trans.response.status = 400
                message = 'No changes to repository.'
                ok = False
            else:
                rmm = repository_metadata_manager.RepositoryMetadataManager(
                    app=trans.app, user=trans.user, repository=repository)
                status, error_message = \
                    rmm.set_repository_metadata_due_to_new_tip( trans.request.host,
                                                                content_alert_str=content_alert_str,
                                                                **kwd )
                if error_message:
                    ok = False
                    trans.response.status = 500
                    message = error_message
        else:
            trans.response.status = 500

        if not ok:
            return {"err_msg": message, "content_alert": content_alert_str}
        else:
            return {"message": message, "content_alert": content_alert_str}
Ejemplo n.º 39
0
 def upload_directory(self, trans, rdah, tdah, repository,
                      uploaded_directory, upload_point,
                      remove_repo_files_not_in_tar, commit_message,
                      new_repo_alert):
     repo_dir = repository.repo_path(trans.app)
     repo = hg_util.get_repo_for_repository(trans.app,
                                            repository=None,
                                            repo_path=repo_dir,
                                            create=False)
     undesirable_dirs_removed = 0
     undesirable_files_removed = 0
     if upload_point is not None:
         full_path = os.path.abspath(os.path.join(repo_dir, upload_point))
     else:
         full_path = os.path.abspath(repo_dir)
     filenames_in_archive = []
     for root, dirs, files in os.walk(uploaded_directory):
         for uploaded_file in files:
             relative_path = os.path.normpath(
                 os.path.join(os.path.relpath(root, uploaded_directory),
                              uploaded_file))
             if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION:
                 ok = os.path.basename(
                     uploaded_file
                 ) == rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME
             elif repository.type == rt_util.TOOL_DEPENDENCY_DEFINITION:
                 ok = os.path.basename(
                     uploaded_file
                 ) == rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME
             else:
                 ok = os.path.basename(
                     uploaded_file) not in commit_util.UNDESIRABLE_FILES
             if ok:
                 for file_path_item in relative_path.split('/'):
                     if file_path_item in commit_util.UNDESIRABLE_DIRS:
                         undesirable_dirs_removed += 1
                         ok = False
                         break
             else:
                 undesirable_files_removed += 1
             if ok:
                 uploaded_file_name = os.path.abspath(
                     os.path.join(root, uploaded_file))
                 if os.path.split(
                         uploaded_file_name
                 )[-1] == rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME:
                     # Inspect the contents of the file to see if toolshed or changeset_revision
                     # attributes are missing and if so, set them appropriately.
                     altered, root_elem, error_message = rdah.handle_tag_attributes(
                         uploaded_file_name)
                     if error_message:
                         return False, error_message, [], '', [], []
                     elif altered:
                         tmp_filename = xml_util.create_and_write_tmp_file(
                             root_elem)
                         shutil.move(tmp_filename, uploaded_file_name)
                 elif os.path.split(uploaded_file_name)[
                         -1] == rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME:
                     # Inspect the contents of the file to see if toolshed or changeset_revision
                     # attributes are missing and if so, set them appropriately.
                     altered, root_elem, error_message = tdah.handle_tag_attributes(
                         uploaded_file_name)
                     if error_message:
                         return False, error_message, [], '', [], []
                     if altered:
                         tmp_filename = xml_util.create_and_write_tmp_file(
                             root_elem)
                         shutil.move(tmp_filename, uploaded_file_name)
                 repo_path = os.path.join(full_path, relative_path)
                 repo_basedir = os.path.normpath(
                     os.path.join(repo_path, os.path.pardir))
                 if not os.path.exists(repo_basedir):
                     os.makedirs(repo_basedir)
                 if os.path.exists(repo_path):
                     if os.path.isdir(repo_path):
                         shutil.rmtree(repo_path)
                     else:
                         os.remove(repo_path)
                 shutil.move(
                     os.path.join(uploaded_directory, relative_path),
                     repo_path)
                 filenames_in_archive.append(relative_path)
     return commit_util.handle_directory_changes(
         trans.app, trans.request.host, trans.user.username, repository,
         full_path, filenames_in_archive, remove_repo_files_not_in_tar,
         new_repo_alert, commit_message, undesirable_dirs_removed,
         undesirable_files_removed)
Ejemplo n.º 40
0
 def get_updated_changeset_revisions_for_repository_dependencies(
         self, key_rd_dicts):
     updated_key_rd_dicts = []
     for key_rd_dict in key_rd_dicts:
         key = key_rd_dict.keys()[0]
         repository_dependency = key_rd_dict[key]
         rd_toolshed, \
         rd_name, \
         rd_owner, \
         rd_changeset_revision, \
         rd_prior_installation_required, \
         rd_only_if_compiling_contained_td = \
             common_util.parse_repository_dependency_tuple( repository_dependency )
         if suc.tool_shed_is_this_tool_shed(rd_toolshed):
             repository = suc.get_repository_by_name_and_owner(
                 self.app, rd_name, rd_owner)
             if repository:
                 repository_id = self.app.security.encode_id(repository.id)
                 repository_metadata = \
                     metadata_util.get_repository_metadata_by_repository_id_changeset_revision( self.app,
                                                                                                repository_id,
                                                                                                rd_changeset_revision )
                 if repository_metadata:
                     # The repository changeset_revision is installable, so no updates are available.
                     new_key_rd_dict = {}
                     new_key_rd_dict[key] = repository_dependency
                     updated_key_rd_dicts.append(key_rd_dict)
                 else:
                     # The repository changeset_revision is no longer installable, so see if there's been an update.
                     repo = hg_util.get_repo_for_repository(
                         self.app,
                         repository=repository,
                         repo_path=None,
                         create=False)
                     changeset_revision = suc.get_next_downloadable_changeset_revision(
                         repository, repo, rd_changeset_revision)
                     repository_metadata = \
                         metadata_util.get_repository_metadata_by_repository_id_changeset_revision( self.app,
                                                                                                    repository_id,
                                                                                                    changeset_revision )
                     if repository_metadata:
                         new_key_rd_dict = {}
                         new_key_rd_dict[ key ] = \
                             [ rd_toolshed, \
                               rd_name, \
                               rd_owner, \
                               repository_metadata.changeset_revision, \
                               rd_prior_installation_required, \
                               rd_only_if_compiling_contained_td ]
                         # We have the updated changset revision.
                         updated_key_rd_dicts.append(new_key_rd_dict)
                     else:
                         repository_components_tuple = container_util.get_components_from_key(
                             key)
                         components_list = suc.extract_components_from_tuple(
                             repository_components_tuple)
                         toolshed, repository_name, repository_owner, repository_changeset_revision = components_list[
                             0:4]
                         # For backward compatibility to the 12/20/12 Galaxy release.
                         if len(components_list) == 4:
                             prior_installation_required = 'False'
                             rd_only_if_compiling_contained_td = 'False'
                         elif len(components_list) == 5:
                             rd_only_if_compiling_contained_td = 'False'
                         message = "The revision %s defined for repository %s owned by %s is invalid, so repository " % \
                             ( str( rd_changeset_revision ), str( rd_name ), str( rd_owner ) )
                         message += "dependencies defined for repository %s will be ignored." % str(
                             repository_name)
                         log.debug(message)
             else:
                 repository_components_tuple = container_util.get_components_from_key(
                     key)
                 components_list = suc.extract_components_from_tuple(
                     repository_components_tuple)
                 toolshed, repository_name, repository_owner, repository_changeset_revision = components_list[
                     0:4]
                 message = "The revision %s defined for repository %s owned by %s is invalid, so repository " % \
                     ( str( rd_changeset_revision ), str( rd_name ), str( rd_owner ) )
                 message += "dependencies defined for repository %s will be ignored." % str(
                     repository_name)
                 log.debug(message)
     return updated_key_rd_dicts
Ejemplo n.º 41
0
 def manage_repository_reviews(self, trans, mine=False, **kwd):
     # The value of the received id is the encoded repository id.
     message = escape(kwd.get('message', ''))
     status = kwd.get('status', 'done')
     repository_id = kwd.get('id', None)
     if repository_id:
         repository = suc.get_repository_in_tool_shed(
             trans.app, repository_id)
         repo = hg_util.get_repo_for_repository(trans.app,
                                                repository=repository,
                                                repo_path=None,
                                                create=False)
         metadata_revision_hashes = [
             metadata_revision.changeset_revision
             for metadata_revision in repository.metadata_revisions
         ]
         reviewed_revision_hashes = [
             review.changeset_revision for review in repository.reviews
         ]
         reviews_dict = odict()
         for changeset in hg_util.get_reversed_changelog_changesets(repo):
             ctx = repo.changectx(changeset)
             changeset_revision = str(ctx)
             if changeset_revision in metadata_revision_hashes or changeset_revision in reviewed_revision_hashes:
                 rev, changeset_revision_label = hg_util.get_rev_label_from_changeset_revision(
                     repo, changeset_revision)
                 if changeset_revision in reviewed_revision_hashes:
                     # Find the review for this changeset_revision
                     repository_reviews = \
                         review_util.get_reviews_by_repository_id_changeset_revision( trans.app,
                                                                                      repository_id,
                                                                                      changeset_revision )
                     # Determine if the current user can add a review to this revision.
                     can_add_review = trans.user not in [
                         repository_review.user
                         for repository_review in repository_reviews
                     ]
                     repository_metadata = suc.get_repository_metadata_by_changeset_revision(
                         trans.app, repository_id, changeset_revision)
                     if repository_metadata:
                         repository_metadata_reviews = util.listify(
                             repository_metadata.reviews)
                     else:
                         repository_metadata_reviews = []
                 else:
                     repository_reviews = []
                     repository_metadata_reviews = []
                     can_add_review = True
                 installable = changeset_revision in metadata_revision_hashes
                 revision_dict = dict(
                     changeset_revision_label=changeset_revision_label,
                     repository_reviews=repository_reviews,
                     repository_metadata_reviews=repository_metadata_reviews,
                     installable=installable,
                     can_add_review=can_add_review)
                 reviews_dict[changeset_revision] = revision_dict
     return trans.fill_template(
         '/webapps/tool_shed/repository_review/reviews_of_repository.mako',
         repository=repository,
         reviews_dict=reviews_dict,
         mine=mine,
         message=message,
         status=status)
Ejemplo n.º 42
0
def build_readme_files_dict(app,
                            repository,
                            changeset_revision,
                            metadata,
                            tool_path=None):
    """
    Return a dictionary of valid readme file name <-> readme file content pairs for all readme files defined in the received metadata.  Since the
    received changeset_revision (which is associated with the received metadata) may not be the latest installable changeset revision, the README
    file contents may not be available on disk.  This method is used by both Galaxy and the Tool Shed.
    """
    if app.name == 'galaxy':
        can_use_disk_files = True
    else:
        latest_downloadable_changeset_revision = metadata_util.get_latest_downloadable_changeset_revision(
            app, repository)
        can_use_disk_files = changeset_revision == latest_downloadable_changeset_revision
    readme_files_dict = {}
    if metadata:
        if 'readme_files' in metadata:
            for relative_path_to_readme_file in metadata['readme_files']:
                readme_file_name = os.path.split(
                    relative_path_to_readme_file)[1]
                if can_use_disk_files:
                    if tool_path:
                        full_path_to_readme_file = os.path.abspath(
                            os.path.join(tool_path,
                                         relative_path_to_readme_file))
                    else:
                        full_path_to_readme_file = os.path.abspath(
                            relative_path_to_readme_file)
                    text = None
                    try:
                        f = open(full_path_to_readme_file, 'r')
                        text = unicodify(f.read())
                        f.close()
                    except Exception:
                        log.exception(
                            "Error reading README file '%s' from disk",
                            relative_path_to_readme_file)
                        text = None
                    if text:
                        text_of_reasonable_length = basic_util.size_string(
                            text)
                        if text_of_reasonable_length.find('.. image:: ') >= 0:
                            # Handle image display for README files that are contained in repositories in the tool shed or installed into Galaxy.
                            try:
                                text_of_reasonable_length = suc.set_image_paths(
                                    app,
                                    text_of_reasonable_length,
                                    encoded_repository_id=app.security.
                                    encode_id(repository.id))
                            except Exception:
                                log.exception(
                                    "Exception in build_readme_files_dict, so images may not be properly displayed"
                                )
                        if readme_file_name.endswith('.rst'):
                            text_of_reasonable_length = Template(
                                rst_to_html(text_of_reasonable_length),
                                input_encoding='utf-8',
                                default_filters=['decode.utf8'],
                                encoding_errors='replace')
                            text_of_reasonable_length = text_of_reasonable_length.render(
                                static_path=web.url_for('/static'),
                                host_url=web.url_for('/', qualified=True))
                            text_of_reasonable_length = unicodify(
                                text_of_reasonable_length)
                        else:
                            text_of_reasonable_length = basic_util.to_html_string(
                                text_of_reasonable_length)
                        readme_files_dict[
                            readme_file_name] = text_of_reasonable_length
                else:
                    # We must be in the tool shed and have an old changeset_revision, so we need to retrieve the file contents from the repository manifest.
                    repo = hg_util.get_repo_for_repository(
                        app, repository=repository)
                    ctx = hg_util.get_changectx_for_changeset(
                        repo, changeset_revision)
                    if ctx:
                        fctx = hg_util.get_file_context_from_ctx(
                            ctx, readme_file_name)
                        if fctx and fctx not in ['DELETED']:
                            try:
                                text = unicodify(fctx.data())
                                readme_files_dict[
                                    readme_file_name] = basic_util.size_string(
                                        text)
                            except Exception:
                                log.exception(
                                    "Error reading README file '%s' from repository manifest",
                                    relative_path_to_readme_file)
    return readme_files_dict
Ejemplo n.º 43
0
 def edit_review( self, trans, **kwd ):
     # The value of the received id is the encoded review id.
     message = escape( kwd.get( 'message', '' ) )
     status = kwd.get( 'status', 'done' )
     review_id = kwd.get( 'id', None )
     review = review_util.get_review( trans.app, review_id )
     components_dict = odict()
     for component in review_util.get_components( trans.app ):
         components_dict[ component.name ] = dict( component=component, component_review=None )
     repository = review.repository
     repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )
     for component_review in review.component_reviews:
         if component_review and component_review.component:
             component_name = component_review.component.name
             if component_name in components_dict:
                 component_review_dict = components_dict[ component_name ]
                 component_review_dict[ 'component_review' ] = component_review
                 components_dict[ component_name ] = component_review_dict
     # Handle a Save button click.
     save_button_clicked = False
     save_buttons = [ '%s%sreview_button' % ( comp_name, STRSEP ) for comp_name in components_dict.keys() ]
     save_buttons.append( 'revision_approved_button' )
     for save_button in save_buttons:
         if save_button in kwd:
             save_button_clicked = True
             break
     if save_button_clicked:
         # Handle the revision_approved_select_field value.
         revision_approved = kwd.get( 'revision_approved', None )
         revision_approved_setting_changed = False
         if revision_approved:
             revision_approved = str( revision_approved )
             if review.approved != revision_approved:
                 revision_approved_setting_changed = True
                 review.approved = revision_approved
                 trans.sa_session.add( review )
                 trans.sa_session.flush()
         saved_component_names = []
         for component_name in components_dict.keys():
             flushed = False
             # Retrieve the review information from the form.
             # The star rating form field is a radio button list, so it will not be received if it was not clicked in the form.
             # Due to this behavior, default the value to 0.
             rating = 0
             for k, v in kwd.items():
                 if k.startswith( '%s%s' % ( component_name, STRSEP ) ):
                     component_review_attr = k.replace( '%s%s' % ( component_name, STRSEP ), '' )
                     if component_review_attr == 'component_id':
                         component_id = str( v )
                     elif component_review_attr == 'comment':
                         comment = str( v )
                     elif component_review_attr == 'private':
                         private = CheckboxField.is_checked( v )
                     elif component_review_attr == 'approved':
                         approved = str( v )
                     elif component_review_attr == 'rating':
                         rating = int( str( v ) )
             component = review_util.get_component( trans.app, component_id )
             component_review = \
                 review_util.get_component_review_by_repository_review_id_component_id( trans.app,
                                                                                        review_id,
                                                                                        component_id )
             if component_review:
                 # See if the existing component review should be updated.
                 if component_review.comment != comment or \
                         component_review.private != private or \
                         component_review.approved != approved or \
                         component_review.rating != rating:
                     component_review.comment = comment
                     component_review.private = private
                     component_review.approved = approved
                     component_review.rating = rating
                     trans.sa_session.add( component_review )
                     trans.sa_session.flush()
                     flushed = True
                     saved_component_names.append( component_name )
             else:
                 # See if a new component_review should be created.
                 if comment or private or approved != trans.model.ComponentReview.approved_states.NO or rating:
                     component_review = trans.model.ComponentReview( repository_review_id=review.id,
                                                                     component_id=component.id,
                                                                     comment=comment,
                                                                     approved=approved,
                                                                     rating=rating )
                     trans.sa_session.add( component_review )
                     trans.sa_session.flush()
                     flushed = True
                     saved_component_names.append( component_name )
             if flushed:
                 # Update the repository rating value to be the average of all component review ratings.
                 average_rating = trans.sa_session.query( func.avg( trans.model.ComponentReview.table.c.rating ) ) \
                                                  .filter( and_( trans.model.ComponentReview.table.c.repository_review_id == review.id,
                                                                 trans.model.ComponentReview.table.c.deleted == false(),
                                                                 trans.model.ComponentReview.table.c.approved != trans.model.ComponentReview.approved_states.NA ) ) \
                                                  .scalar()
                 if average_rating is not None:
                     review.rating = int( average_rating )
                 trans.sa_session.add( review )
                 trans.sa_session.flush()
                 # Update the information in components_dict.
                 if component_name in components_dict:
                     component_review_dict = components_dict[ component_name ]
                     component_review_dict[ 'component_review' ] = component_review
                     components_dict[ component_name ] = component_review_dict
         if revision_approved_setting_changed:
             message += 'Approved value <b>%s</b> saved for this revision.<br/>' % review.approved
         if saved_component_names:
             message += 'Reviews were saved for components: %s' % ', '.join( saved_component_names )
         if not revision_approved_setting_changed and not saved_component_names:
             message += 'No changes were made to this review, so nothing was saved.'
     if review and review.approved:
         selected_value = review.approved
     else:
         selected_value = trans.model.ComponentReview.approved_states.NO
     revision_approved_select_field = grids_util.build_approved_select_field( trans,
                                                                              name='revision_approved',
                                                                              selected_value=selected_value,
                                                                              for_component=False )
     rev, changeset_revision_label = hg_util.get_rev_label_from_changeset_revision( repo, review.changeset_revision )
     return trans.fill_template( '/webapps/tool_shed/repository_review/edit_review.mako',
                                 repository=repository,
                                 review=review,
                                 changeset_revision_label=changeset_revision_label,
                                 revision_approved_select_field=revision_approved_select_field,
                                 components_dict=components_dict,
                                 message=message,
                                 status=status )
Ejemplo n.º 44
0
 def revision(self, app):
     repo = hg_util.get_repo_for_repository(app, repository=self)
     tip_ctx = repo.changectx(repo.changelog.tip())
     return "%s:%s" % (str(
         tip_ctx.rev()), str(repo.changectx(repo.changelog.tip())))
Ejemplo n.º 45
0
def check_and_update_repository_metadata(app, info_only=False, verbosity=1):
    """
    This method will iterate through all records in the repository_metadata
    table, checking each one for tool metadata, then checking the tool
    metadata for tests.  Each tool's metadata should look something like:
    {
      "add_to_tool_panel": true,
      "description": "",
      "guid": "toolshed.url:9009/repos/owner/name/tool_id/1.2.3",
      "id": "tool_wrapper",
      "name": "Map with Tool Wrapper",
      "requirements": [],
      "tests": [],
      "tool_config": "database/community_files/000/repo_1/tool_wrapper.xml",
      "tool_type": "default",
      "version": "1.2.3",
      "version_string_cmd": null
    }
    If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository)
    not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install
    and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision
    not to be tested.
    """
    start = time.time()
    skip_metadata_ids = []
    checked_repository_ids = []
    tool_count = 0
    has_tests = 0
    no_tests = 0
    valid_revisions = 0
    invalid_revisions = 0
    records_checked = 0
    # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
    print '# -------------------------------------------------------------------------------------------'
    print '# The skip_tool_test setting has been set for the following repository revision, so they will not be tested.'
    skip_metadata_ids = []
    for skip_tool_test in app.sa_session.query(app.model.SkipToolTest):
        print '# repository_metadata_id: %s, changeset_revision: %s' % \
            ( str( skip_tool_test.repository_metadata_id ), str( skip_tool_test.initial_changeset_revision ) )
        print 'reason: %s' % str(skip_tool_test.comment)
        skip_metadata_ids.append(skip_tool_test.repository_metadata_id)
    # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test,
    # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable,
    # because it's redundant to test a revision that a user can't install.
    for repository_metadata in app.sa_session.query( app.model.RepositoryMetadata ) \
                                             .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
                                                            app.model.RepositoryMetadata.table.c.includes_tools == True,
                                                            app.model.RepositoryMetadata.table.c.do_not_test == False ) ):
        # Initialize some items.
        missing_test_components = []
        revision_has_test_data = False
        testable_revision = False
        repository = repository_metadata.repository
        records_checked += 1
        # Check the next repository revision.
        changeset_revision = str(repository_metadata.changeset_revision)
        name = repository.name
        owner = repository.user.username
        metadata = repository_metadata.metadata
        repository = repository_metadata.repository
        if repository.id not in checked_repository_ids:
            checked_repository_ids.append(repository.id)
        print '# -------------------------------------------------------------------------------------------'
        print '# Checking revision %s of %s owned by %s.' % (
            changeset_revision, name, owner)
        if repository_metadata.id in skip_metadata_ids:
            print '# Skipping revision %s of %s owned by %s because the skip_tool_test setting has been set.' % (
                changeset_revision, name, owner)
            continue
        # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
        # only repositories that contain tools.
        tool_dicts = metadata.get('tools', None)
        if tool_dicts is not None:
            # Clone the repository up to the changeset revision we're checking.
            repo_dir = repository.repo_path(app)
            hg_util.get_repo_for_repository(app,
                                            repository=None,
                                            repo_path=repo_dir,
                                            create=False)
            work_dir = tempfile.mkdtemp(prefix="tmp-toolshed-cafr")
            cloned_ok, error_message = hg_util.clone_repository(
                repo_dir, work_dir, changeset_revision)
            if cloned_ok:
                # Iterate through all the directories in the cloned changeset revision and determine whether there's a
                # directory named test-data. If this directory is not present update the metadata record for the changeset
                # revision we're checking.
                for root, dirs, files in os.walk(work_dir):
                    if '.hg' in dirs:
                        dirs.remove('.hg')
                    if 'test-data' in dirs:
                        revision_has_test_data = True
                        test_data_path = os.path.join(
                            root, dirs[dirs.index('test-data')])
                        break
            if revision_has_test_data:
                print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % (
                    changeset_revision, name, owner)
            else:
                print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % (
                    changeset_revision, name, owner)
            print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
                ( changeset_revision, name, owner )
            # Inspect each tool_dict for defined functional tests.  If there
            # are no tests, this tool should not be tested, since the tool
            # functional tests only report failure if the test itself fails,
            # not if it's missing or undefined. Filtering out those
            # repositories at this step will reduce the number of "false
            # negatives" the automated functional test framework produces.
            for tool_dict in tool_dicts:
                failure_reason = ''
                problem_found = False
                tool_has_defined_tests = False
                tool_has_test_files = False
                missing_test_files = []
                tool_count += 1
                tool_id = tool_dict['id']
                tool_version = tool_dict['version']
                tool_guid = tool_dict['guid']
                if verbosity >= 1:
                    print "# Checking tool ID '%s' in changeset revision %s of %s." % (
                        tool_id, changeset_revision, name)
                defined_test_dicts = tool_dict.get('tests', None)
                if defined_test_dicts is not None:
                    # We need to inspect the <test> tags because the following tags...
                    # <tests>
                    # </tests>
                    # ...will produce the following metadata:
                    # "tests": []
                    # And the following tags...
                    # <tests>
                    #     <test>
                    #    </test>
                    # </tests>
                    # ...will produce the following metadata:
                    # "tests":
                    #    [{"inputs": [], "name": "Test-1", "outputs": [], "required_files": []}]
                    for defined_test_dict in defined_test_dicts:
                        inputs = defined_test_dict.get('inputs', [])
                        outputs = defined_test_dict.get('outputs', [])
                        if inputs and outputs:
                            # At least one tool within the repository has a valid <test> tag.
                            tool_has_defined_tests = True
                            break
                if tool_has_defined_tests:
                    print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
                        ( tool_id, changeset_revision, name )
                    has_tests += 1
                else:
                    print '# No functional tests defined for %s.' % tool_id
                    no_tests += 1
                if tool_has_defined_tests and revision_has_test_data:
                    missing_test_files = check_for_missing_test_files(
                        defined_test_dicts, test_data_path)
                    if missing_test_files:
                        print "# Tool id '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
                            ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
                    else:
                        tool_has_test_files = True
                if not revision_has_test_data:
                    failure_reason += 'Repository does not have a test-data directory. '
                    problem_found = True
                if not tool_has_defined_tests:
                    failure_reason += 'Functional test definitions missing for %s. ' % tool_id
                    problem_found = True
                if missing_test_files:
                    failure_reason += 'One or more test files are missing for tool %s: %s' % (
                        tool_id, ', '.join(missing_test_files))
                    problem_found = True
                test_errors = dict(tool_id=tool_id,
                                   tool_version=tool_version,
                                   tool_guid=tool_guid,
                                   missing_components=failure_reason)
                # Only append this error dict if it hasn't already been added.
                if problem_found:
                    if test_errors not in missing_test_components:
                        missing_test_components.append(test_errors)
                if tool_has_defined_tests and tool_has_test_files:
                    print '# Revision %s of %s owned by %s is a testable revision.' % (
                        changeset_revision, name, owner)
                    testable_revision = True
            # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
            if os.path.exists(work_dir):
                shutil.rmtree(work_dir)
            if not missing_test_components:
                valid_revisions += 1
                print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % (
                    changeset_revision, name, owner)
            else:
                invalid_revisions += 1
                print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % (
                    changeset_revision, name, owner)
                if verbosity >= 1:
                    for missing_test_component in missing_test_components:
                        if 'missing_components' in missing_test_component:
                            print '# %s' % missing_test_component[
                                'missing_components']
            if not info_only:
                # Get or create the list of tool_test_results dictionaries.
                if repository_metadata.tool_test_results is not None:
                    # We'll listify the column value in case it uses the old approach of storing the results of only a single test run.
                    tool_test_results_dicts = listify(
                        repository_metadata.tool_test_results)
                else:
                    tool_test_results_dicts = []
                if tool_test_results_dicts:
                    # Inspect the tool_test_results_dict for the last test run in case it contains only a test_environment
                    # entry.  This will occur with multiple runs of this script without running the associated
                    # install_and_test_tool_sed_repositories.sh script which will further populate the tool_test_results_dict.
                    tool_test_results_dict = tool_test_results_dicts[0]
                    if len(tool_test_results_dict) <= 1:
                        # We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only
                        # a test_environment entry.  If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts
                        # since it will be re-inserted later.
                        tool_test_results_dict = tool_test_results_dicts.pop(0)
                    elif (len(tool_test_results_dict) == 2
                          and 'test_environment' in tool_test_results_dict and
                          'missing_test_components' in tool_test_results_dict):
                        # We can re-use tool_test_results_dict if its only entries are "test_environment" and "missing_test_components".
                        # In this case, some tools are missing tests components while others are not.
                        tool_test_results_dict = tool_test_results_dicts.pop(0)
                    else:
                        # The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
                        tool_test_results_dict = {}
                else:
                    # Create a new dictionary for the most recent test run.
                    tool_test_results_dict = {}
                test_environment_dict = tool_test_results_dict.get(
                    'test_environment', {})
                # Add the current time as the approximate time that this test run occurs.  A similar value will also be
                # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
                # may be configured to store multiple test run results, so each must be associated with a time stamp.
                now = time.strftime("%Y-%m-%d %H:%M:%S")
                test_environment_dict['time_tested'] = now
                test_environment_dict[
                    'tool_shed_database_version'] = get_database_version(app)
                test_environment_dict[
                    'tool_shed_mercurial_version'] = __version__.version
                test_environment_dict[
                    'tool_shed_revision'] = get_repository_current_revision(
                        os.getcwd())
                tool_test_results_dict[
                    'test_environment'] = test_environment_dict
                # The repository_metadata.time_last_tested column is not changed by this script since no testing is performed here.
                if missing_test_components:
                    # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been
                    # found in this revision, and:
                    # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision.
                    #    In this case, the revision will never be updated with the missing components, and re-testing it would be redundant.
                    # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable
                    #    revision. In this case, if the repository is updated with test data or functional tests, the downloadable
                    #    changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable
                    #    changeset revision will be created, either of which will be automatically checked and flagged as appropriate.
                    #    In the install and test script, this behavior is slightly different, since we do want to always run functional
                    #    tests on the most recent downloadable changeset revision.
                    if should_set_do_not_test_flag(app, repository,
                                                   changeset_revision,
                                                   testable_revision):
                        print "# Setting do_not_test to True on revision %s of %s owned by %s because it is missing test components" % (
                            changeset_revision, name, owner)
                        print "# and it is not the latest downloadable revision."
                        repository_metadata.do_not_test = True
                    if not testable_revision:
                        # Even though some tools may be missing test components, it may be possible to test other tools.  Since the
                        # install and test framework filters out repositories marked as missing test components, we'll set it only if
                        # no tools can be tested.
                        print '# Setting missing_test_components to True for revision %s of %s owned by %s because all tools are missing test components.' % (
                            changeset_revision, name, owner)
                        repository_metadata.missing_test_components = True
                        print "# Setting tools_functionally_correct to False on revision %s of %s owned by %s because it is missing test components" % (
                            changeset_revision, name, owner)
                        repository_metadata.tools_functionally_correct = False
                    tool_test_results_dict[
                        'missing_test_components'] = missing_test_components
                # Store only the configured number of test runs.
                num_tool_test_results_saved = int(
                    app.config.num_tool_test_results_saved)
                if len(tool_test_results_dicts) >= num_tool_test_results_saved:
                    test_results_index = num_tool_test_results_saved - 1
                    new_tool_test_results_dicts = tool_test_results_dicts[:
                                                                          test_results_index]
                else:
                    new_tool_test_results_dicts = [
                        d for d in tool_test_results_dicts
                    ]
                # Insert the new element into the first position in the list.
                new_tool_test_results_dicts.insert(0, tool_test_results_dict)
                repository_metadata.tool_test_results = new_tool_test_results_dicts
                app.sa_session.add(repository_metadata)
                app.sa_session.flush()
    stop = time.time()
    print '# -------------------------------------------------------------------------------------------'
    print '# Checked %d repositories with %d tools in %d changeset revisions.' % (
        len(checked_repository_ids), tool_count, records_checked)
    print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions
    print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions
    print '# Found %d tools without functional tests.' % no_tests
    print '# Found %d tools with functional tests.' % has_tests
    if info_only:
        print '# Database not updated, info_only set.'
    print "# Elapsed time: ", stop - start
    print "#############################################################################"
Ejemplo n.º 46
0
 def tip(self, app):
     repo = hg_util.get_repo_for_repository(app, repository=self)
     return str(repo.changectx(repo.changelog.tip()))
Ejemplo n.º 47
0
def create_repository(app,
                      name,
                      type,
                      description,
                      long_description,
                      user_id,
                      category_ids=[],
                      remote_repository_url=None,
                      homepage_url=None):
    """Create a new ToolShed repository"""
    sa_session = app.model.context.current
    # Add the repository record to the database.
    repository = app.model.Repository(
        name=name,
        type=type,
        remote_repository_url=remote_repository_url,
        homepage_url=homepage_url,
        description=description,
        long_description=long_description,
        user_id=user_id)
    # Flush to get the id.
    sa_session.add(repository)
    sa_session.flush()
    # Create an admin role for the repository.
    create_repository_admin_role(app, repository)
    # Determine the repository's repo_path on disk.
    dir = os.path.join(app.config.file_path, *directory_hash_id(repository.id))
    # Create directory if it does not exist.
    if not os.path.exists(dir):
        os.makedirs(dir)
    # Define repo name inside hashed directory.
    repository_path = os.path.join(dir, "repo_%d" % repository.id)
    # Create local repository directory.
    if not os.path.exists(repository_path):
        os.makedirs(repository_path)
    # Create the local repository.
    hg_util.get_repo_for_repository(app,
                                    repository=None,
                                    repo_path=repository_path,
                                    create=True)
    # Add an entry in the hgweb.config file for the local repository.
    lhs = "repos/%s/%s" % (repository.user.username, repository.name)
    app.hgweb_config_manager.add_entry(lhs, repository_path)
    # Create a .hg/hgrc file for the local repository.
    hg_util.create_hgrc_file(app, repository)
    flush_needed = False
    if category_ids:
        # Create category associations
        for category_id in category_ids:
            category = sa_session.query( app.model.Category ) \
                                 .get( app.security.decode_id( category_id ) )
            rca = app.model.RepositoryCategoryAssociation(repository, category)
            sa_session.add(rca)
            flush_needed = True
    if flush_needed:
        sa_session.flush()
    # Update the repository registry.
    app.repository_registry.add_entry(repository)
    message = "Repository <b>%s</b> has been created." % escape(
        str(repository.name))
    return repository, message
Ejemplo n.º 48
0
    def get_repository_revision_install_info(self, trans, name, owner,
                                             changeset_revision, **kwd):
        """
        GET /api/repositories/get_repository_revision_install_info

        :param name: the name of the Repository
        :param owner: the owner of the Repository
        :param changeset_revision: the changeset_revision of the RepositoryMetadata object associated with the Repository

        Returns a list of the following dictionaries::
        - a dictionary defining the Repository.  For example:
        {
            "deleted": false,
            "deprecated": false,
            "description": "add_column hello",
            "id": "f9cad7b01a472135",
            "long_description": "add_column hello",
            "name": "add_column",
            "owner": "test",
            "private": false,
            "times_downloaded": 6,
            "url": "/api/repositories/f9cad7b01a472135",
            "user_id": "f9cad7b01a472135"
        }
        - a dictionary defining the Repository revision (RepositoryMetadata).  For example:
        {
            "changeset_revision": "3a08cc21466f",
            "downloadable": true,
            "has_repository_dependencies": false,
            "has_repository_dependencies_only_if_compiling_contained_td": false,
            "id": "f9cad7b01a472135",
            "includes_datatypes": false,
            "includes_tool_dependencies": false,
            "includes_tools": true,
            "includes_tools_for_display_in_tool_panel": true,
            "includes_workflows": false,
            "malicious": false,
            "repository_id": "f9cad7b01a472135",
            "url": "/api/repository_revisions/f9cad7b01a472135"
        }
        - a dictionary including the additional information required to install the repository.  For example:
        {
            "add_column": [
                "add_column hello",
                "http://test@localhost:9009/repos/test/add_column",
                "3a08cc21466f",
                "1",
                "test",
                {},
                {}
            ]
        }
        """
        # Example URL:
        # http://<xyz>/api/repositories/get_repository_revision_install_info?name=<n>&owner=<o>&changeset_revision=<cr>
        if name and owner and changeset_revision:
            # Get the repository information.
            repository = suc.get_repository_by_name_and_owner(
                trans.app, name, owner)
            if repository is None:
                log.debug('Cannot locate repository %s owned by %s' %
                          (str(name), str(owner)))
                return {}, {}, {}
            encoded_repository_id = trans.security.encode_id(repository.id)
            repository_dict = repository.to_dict(
                view='element', value_mapper=self.__get_value_mapper(trans))
            repository_dict['url'] = web.url_for(controller='repositories',
                                                 action='show',
                                                 id=encoded_repository_id)
            # Get the repository_metadata information.
            repository_metadata = suc.get_repository_metadata_by_changeset_revision(
                trans.app, encoded_repository_id, changeset_revision)
            if repository_metadata is None:
                # The changeset_revision column in the repository_metadata table has been updated with a new
                # value value, so find the changeset_revision to which we need to update.
                repo = hg_util.get_repo_for_repository(trans.app,
                                                       repository=repository,
                                                       repo_path=None,
                                                       create=False)
                new_changeset_revision = suc.get_next_downloadable_changeset_revision(
                    repository, repo, changeset_revision)
                repository_metadata = suc.get_repository_metadata_by_changeset_revision(
                    trans.app, encoded_repository_id, new_changeset_revision)
                changeset_revision = new_changeset_revision
            if repository_metadata is not None:
                encoded_repository_metadata_id = trans.security.encode_id(
                    repository_metadata.id)
                repository_metadata_dict = repository_metadata.to_dict(
                    view='collection',
                    value_mapper=self.__get_value_mapper(trans))
                repository_metadata_dict['url'] = web.url_for(
                    controller='repository_revisions',
                    action='show',
                    id=encoded_repository_metadata_id)
                # Get the repo_info_dict for installing the repository.
                repo_info_dict, \
                includes_tools, \
                includes_tool_dependencies, \
                includes_tools_for_display_in_tool_panel, \
                has_repository_dependencies, \
                has_repository_dependencies_only_if_compiling_contained_td = \
                    repository_util.get_repo_info_dict( trans.app,
                                                        trans.user,
                                                        encoded_repository_id,
                                                        changeset_revision )
                return repository_dict, repository_metadata_dict, repo_info_dict
            else:
                log.debug( "Unable to locate repository_metadata record for repository id %s and changeset_revision %s" % \
                    ( str( repository.id ), str( changeset_revision ) ) )
                return repository_dict, {}, {}
        else:
            debug_msg = "Error in the Tool Shed repositories API in get_repository_revision_install_info: "
            debug_msg += "Invalid name %s or owner %s or changeset_revision %s received." % \
                ( str( name ), str( owner ), str( changeset_revision ) )
            log.debug(debug_msg)
            return {}, {}, {}
Ejemplo n.º 49
0
def build_readme_files_dict(app,
                            repository,
                            changeset_revision,
                            metadata,
                            tool_path=None):
    """
    Return a dictionary of valid readme file name <-> readme file content pairs for all readme files defined in the received metadata.  Since the
    received changeset_revision (which is associated with the received metadata) may not be the latest installable changeset revision, the README
    file contents may not be available on disk.  This method is used by both Galaxy and the Tool Shed.
    """
    if app.name == 'galaxy':
        can_use_disk_files = True
    else:
        repo = hg_util.get_repo_for_repository(app,
                                               repository=repository,
                                               repo_path=None,
                                               create=False)
        latest_downloadable_changeset_revision = suc.get_latest_downloadable_changeset_revision(
            app, repository, repo)
        can_use_disk_files = changeset_revision == latest_downloadable_changeset_revision
    readme_files_dict = {}
    if metadata:
        if 'readme_files' in metadata:
            for relative_path_to_readme_file in metadata['readme_files']:
                readme_file_name = os.path.split(
                    relative_path_to_readme_file)[1]
                if can_use_disk_files:
                    if tool_path:
                        full_path_to_readme_file = os.path.abspath(
                            os.path.join(tool_path,
                                         relative_path_to_readme_file))
                    else:
                        full_path_to_readme_file = os.path.abspath(
                            relative_path_to_readme_file)
                    text = None
                    try:
                        f = open(full_path_to_readme_file, 'r')
                        text = unicodify(f.read())
                        f.close()
                    except Exception, e:
                        log.exception(
                            "Error reading README file '%s' from disk: %s" %
                            (str(relative_path_to_readme_file), str(e)))
                        text = None
                    if text:
                        text_of_reasonable_length = basic_util.size_string(
                            text)
                        if text_of_reasonable_length.find('.. image:: ') >= 0:
                            # Handle image display for README files that are contained in repositories in the tool shed or installed into Galaxy.
                            lock = threading.Lock()
                            lock.acquire(True)
                            try:
                                text_of_reasonable_length = suc.set_image_paths(
                                    app, app.security.encode_id(repository.id),
                                    text_of_reasonable_length)
                            except Exception, e:
                                log.exception(
                                    "Exception in build_readme_files_dict, so images may not be properly displayed:\n%s"
                                    % str(e))
                            finally:
                                lock.release()
                        if readme_file_name.endswith('.rst'):
                            text_of_reasonable_length = Template(
                                rst_to_html(text_of_reasonable_length),
                                input_encoding='utf-8',
                                output_encoding='utf-8',
                                default_filters=['decode.utf8'],
                                encoding_errors='replace')
                            text_of_reasonable_length = text_of_reasonable_length.render(
                                static_path=web.url_for('/static'),
                                host_url=web.url_for('/', qualified=True))
                            text_of_reasonable_length = unicodify(
                                text_of_reasonable_length)
                        else:
                            text_of_reasonable_length = basic_util.to_html_string(
                                text_of_reasonable_length)
                        readme_files_dict[
                            readme_file_name] = text_of_reasonable_length
Ejemplo n.º 50
0
 def upload(self, trans, **kwd):
     message = escape(kwd.get('message', ''))
     status = kwd.get('status', 'done')
     commit_message = escape(kwd.get('commit_message', 'Uploaded'))
     repository_id = kwd.get('repository_id', '')
     repository = repository_util.get_repository_in_tool_shed(trans.app, repository_id)
     repo_dir = repository.repo_path(trans.app)
     repo = hg_util.get_repo_for_repository(trans.app, repository=None, repo_path=repo_dir, create=False)
     uncompress_file = util.string_as_bool(kwd.get('uncompress_file', 'true'))
     remove_repo_files_not_in_tar = util.string_as_bool(kwd.get('remove_repo_files_not_in_tar', 'true'))
     uploaded_file = None
     upload_point = commit_util.get_upload_point(repository, **kwd)
     tip = repository.tip(trans.app)
     file_data = kwd.get('file_data', '')
     url = kwd.get('url', '')
     # Part of the upload process is sending email notification to those that have registered to
     # receive them.  One scenario occurs when the first change set is produced for the repository.
     # See the suc.handle_email_alerts() method for the definition of the scenarios.
     new_repo_alert = repository.is_new(trans.app)
     uploaded_directory = None
     if kwd.get('upload_button', False):
         if file_data == '' and url == '':
             message = 'No files were entered on the upload form.'
             status = 'error'
             uploaded_file = None
         elif url and url.startswith('hg'):
             # Use mercurial clone to fetch repository, contents will then be copied over.
             uploaded_directory = tempfile.mkdtemp()
             repo_url = 'http%s' % url[len('hg'):]
             repo_url = repo_url.encode('ascii', 'replace')
             try:
                 commands.clone(hg_util.get_configured_ui(), repo_url, uploaded_directory)
             except Exception as e:
                 message = 'Error uploading via mercurial clone: %s' % basic_util.to_html_string(str(e))
                 status = 'error'
                 basic_util.remove_dir(uploaded_directory)
                 uploaded_directory = None
         elif url:
             valid_url = True
             try:
                 stream = requests.get(url, stream=True)
             except Exception as e:
                 valid_url = False
                 message = 'Error uploading file via http: %s' % str(e)
                 status = 'error'
                 uploaded_file = None
             if valid_url:
                 fd, uploaded_file_name = tempfile.mkstemp()
                 uploaded_file = open(uploaded_file_name, 'wb')
                 for chunk in stream.iter_content(chunk_size=util.CHUNK_SIZE):
                     if chunk:
                         uploaded_file.write(chunk)
                 uploaded_file.flush()
                 uploaded_file_filename = url.split('/')[-1]
                 isempty = os.path.getsize(os.path.abspath(uploaded_file_name)) == 0
         elif file_data not in ('', None):
             uploaded_file = file_data.file
             uploaded_file_name = uploaded_file.name
             uploaded_file_filename = os.path.split(file_data.filename)[-1]
             isempty = os.path.getsize(os.path.abspath(uploaded_file_name)) == 0
         if uploaded_file or uploaded_directory:
             rdah = attribute_handlers.RepositoryDependencyAttributeHandler(trans.app, unpopulate=False)
             tdah = attribute_handlers.ToolDependencyAttributeHandler(trans.app, unpopulate=False)
             tdtm = data_table_manager.ToolDataTableManager(trans.app)
             ok = True
             isgzip = False
             isbz2 = False
             if uploaded_file:
                 if uncompress_file:
                     isgzip = checkers.is_gzip(uploaded_file_name)
                     if not isgzip:
                         isbz2 = checkers.is_bz2(uploaded_file_name)
                 if isempty:
                     tar = None
                     istar = False
                 else:
                     # Determine what we have - a single file or an archive
                     try:
                         if (isgzip or isbz2) and uncompress_file:
                             # Open for reading with transparent compression.
                             tar = tarfile.open(uploaded_file_name, 'r:*')
                         else:
                             tar = tarfile.open(uploaded_file_name)
                         istar = True
                     except tarfile.ReadError as e:
                         tar = None
                         istar = False
             else:
                 # Uploaded directory
                 istar = False
             if istar:
                 ok, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \
                     repository_content_util.upload_tar(
                         trans,
                         rdah,
                         tdah,
                         repository,
                         tar,
                         uploaded_file,
                         upload_point,
                         remove_repo_files_not_in_tar,
                         commit_message,
                         new_repo_alert
                     )
             elif uploaded_directory:
                 ok, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \
                     self.upload_directory(trans,
                                           rdah,
                                           tdah,
                                           repository,
                                           uploaded_directory,
                                           upload_point,
                                           remove_repo_files_not_in_tar,
                                           commit_message,
                                           new_repo_alert)
             else:
                 if (isgzip or isbz2) and uncompress_file:
                     uploaded_file_filename = commit_util.uncompress(repository,
                                                                     uploaded_file_name,
                                                                     uploaded_file_filename,
                                                                     isgzip=isgzip,
                                                                     isbz2=isbz2)
                 if repository.type == rt_util.REPOSITORY_SUITE_DEFINITION and \
                         uploaded_file_filename != rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME:
                     ok = False
                     message = 'Repositories of type <b>Repository suite definition</b> can only contain a single file named '
                     message += '<b>repository_dependencies.xml</b>.'
                 elif repository.type == rt_util.TOOL_DEPENDENCY_DEFINITION and \
                         uploaded_file_filename != rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME:
                     ok = False
                     message = 'Repositories of type <b>Tool dependency definition</b> can only contain a single file named '
                     message += '<b>tool_dependencies.xml</b>.'
                 if ok:
                     if upload_point is not None:
                         full_path = os.path.abspath(os.path.join(repo_dir, upload_point, uploaded_file_filename))
                     else:
                         full_path = os.path.abspath(os.path.join(repo_dir, uploaded_file_filename))
                     # Move some version of the uploaded file to the load_point within the repository hierarchy.
                     if uploaded_file_filename in [rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME]:
                         # Inspect the contents of the file to see if toolshed or changeset_revision attributes
                         # are missing and if so, set them appropriately.
                         altered, root_elem, error_message = rdah.handle_tag_attributes(uploaded_file_name)
                         if error_message:
                             ok = False
                             message = error_message
                             status = 'error'
                         elif altered:
                             tmp_filename = xml_util.create_and_write_tmp_file(root_elem)
                             shutil.move(tmp_filename, full_path)
                         else:
                             shutil.move(uploaded_file_name, full_path)
                     elif uploaded_file_filename in [rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME]:
                         # Inspect the contents of the file to see if changeset_revision values are
                         # missing and if so, set them appropriately.
                         altered, root_elem, error_message = tdah.handle_tag_attributes(uploaded_file_name)
                         if error_message:
                             ok = False
                             message = error_message
                             status = 'error'
                         if ok:
                             if altered:
                                 tmp_filename = xml_util.create_and_write_tmp_file(root_elem)
                                 shutil.move(tmp_filename, full_path)
                             else:
                                 shutil.move(uploaded_file_name, full_path)
                     else:
                         shutil.move(uploaded_file_name, full_path)
                     if ok:
                         # See if any admin users have chosen to receive email alerts when a repository is updated.
                         # If so, check every uploaded file to ensure content is appropriate.
                         check_contents = commit_util.check_file_contents_for_email_alerts(trans.app)
                         if check_contents and os.path.isfile(full_path):
                             content_alert_str = commit_util.check_file_content_for_html_and_images(full_path)
                         else:
                             content_alert_str = ''
                         hg_util.add_changeset(repo.ui, repo, full_path)
                         # Convert from unicode to prevent "TypeError: array item must be char"
                         full_path = full_path.encode('ascii', 'replace')
                         hg_util.commit_changeset(repo.ui,
                                                  repo,
                                                  full_path_to_changeset=full_path,
                                                  username=trans.user.username,
                                                  message=commit_message)
                         if full_path.endswith('tool_data_table_conf.xml.sample'):
                             # Handle the special case where a tool_data_table_conf.xml.sample file is being uploaded
                             # by parsing the file and adding new entries to the in-memory trans.app.tool_data_tables
                             # dictionary.
                             error, error_message = tdtm.handle_sample_tool_data_table_conf_file(full_path, persist=False)
                             if error:
                                 message = '%s<br/>%s' % (message, error_message)
                         # See if the content of the change set was valid.
                         admin_only = len(repository.downloadable_revisions) != 1
                         suc.handle_email_alerts(trans.app,
                                                 trans.request.host,
                                                 repository,
                                                 content_alert_str=content_alert_str,
                                                 new_repo_alert=new_repo_alert,
                                                 admin_only=admin_only)
             if ok:
                 # Update the repository files for browsing.
                 hg_util.update_repository(repo)
                 # Get the new repository tip.
                 if tip == repository.tip(trans.app):
                     message = 'No changes to repository.  '
                     status = 'warning'
                 else:
                     if (isgzip or isbz2) and uncompress_file:
                         uncompress_str = ' uncompressed and '
                     else:
                         uncompress_str = ' '
                     if uploaded_directory:
                         source_type = "repository"
                         source = url
                     else:
                         source_type = "file"
                         source = uploaded_file_filename
                     message = "The %s <b>%s</b> has been successfully%suploaded to the repository.  " % \
                         (source_type, escape(source), uncompress_str)
                     if istar and (undesirable_dirs_removed or undesirable_files_removed):
                         items_removed = undesirable_dirs_removed + undesirable_files_removed
                         message += "  %d undesirable items (.hg .svn .git directories, .DS_Store, hgrc files, etc) " % items_removed
                         message += "were removed from the archive.  "
                     if istar and remove_repo_files_not_in_tar and files_to_remove:
                         if upload_point is not None:
                             message += "  %d files were removed from the repository relative to the selected upload point '%s'.  " % \
                                 (len(files_to_remove), upload_point)
                         else:
                             message += "  %d files were removed from the repository root.  " % len(files_to_remove)
                     rmm = repository_metadata_manager.RepositoryMetadataManager(app=trans.app,
                                                                                 user=trans.user,
                                                                                 repository=repository)
                     status, error_message = \
                         rmm.set_repository_metadata_due_to_new_tip(trans.request.host,
                                                                    content_alert_str=content_alert_str,
                                                                    **kwd)
                     if error_message:
                         message = error_message
                     kwd['message'] = message
                 if repository.metadata_revisions:
                     # A repository's metadata revisions are order descending by update_time, so the zeroth revision
                     # will be the tip just after an upload.
                     metadata_dict = repository.metadata_revisions[0].metadata
                 else:
                     metadata_dict = {}
                 dd = dependency_display.DependencyDisplayer(trans.app)
                 if str(repository.type) not in [rt_util.REPOSITORY_SUITE_DEFINITION,
                                                 rt_util.TOOL_DEPENDENCY_DEFINITION]:
                     change_repository_type_message = rt_util.generate_message_for_repository_type_change(trans.app,
                                                                                                          repository)
                     if change_repository_type_message:
                         message += change_repository_type_message
                         status = 'warning'
                     else:
                         # Provide a warning message if a tool_dependencies.xml file is provided, but tool dependencies
                         # weren't loaded due to a requirement tag mismatch or some other problem.  Tool dependency
                         # definitions can define orphan tool dependencies (no relationship to any tools contained in the
                         # repository), so warning messages are important because orphans are always valid.  The repository
                         # owner must be warned in case they did not intend to define an orphan dependency, but simply
                         # provided incorrect information (tool shed, name owner, changeset_revision) for the definition.
                         orphan_message = dd.generate_message_for_orphan_tool_dependencies(repository, metadata_dict)
                         if orphan_message:
                             message += orphan_message
                             status = 'warning'
                 # Handle messaging for invalid tool dependencies.
                 invalid_tool_dependencies_message = dd.generate_message_for_invalid_tool_dependencies(metadata_dict)
                 if invalid_tool_dependencies_message:
                     message += invalid_tool_dependencies_message
                     status = 'error'
                 # Handle messaging for invalid repository dependencies.
                 invalid_repository_dependencies_message = \
                     dd.generate_message_for_invalid_repository_dependencies(metadata_dict,
                                                                             error_from_tuple=True)
                 if invalid_repository_dependencies_message:
                     message += invalid_repository_dependencies_message
                     status = 'error'
                 # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
                 tdtm.reset_tool_data_tables()
                 if uploaded_directory:
                     basic_util.remove_dir(uploaded_directory)
                 trans.response.send_redirect(web.url_for(controller='repository',
                                                          action='browse_repository',
                                                          id=repository_id,
                                                          commit_message='Deleted selected files',
                                                          message=message,
                                                          status=status))
             else:
                 if uploaded_directory:
                     basic_util.remove_dir(uploaded_directory)
                 status = 'error'
             # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
             tdtm.reset_tool_data_tables()
     return trans.fill_template('/webapps/tool_shed/repository/upload.mako',
                                repository=repository,
                                changeset_revision=tip,
                                url=url,
                                commit_message=commit_message,
                                uncompress_file=uncompress_file,
                                remove_repo_files_not_in_tar=remove_repo_files_not_in_tar,
                                message=message,
                                status=status)
Ejemplo n.º 51
0
 def handle_elem(self, elem):
     """Populate or unpopulate the changeset_revision and toolshed attributes of repository tags."""
     # <repository name="molecule_datatypes" owner="test" changeset_revision="1a070566e9c6" />
     # <repository changeset_revision="xxx" name="package_xorg_macros_1_17_1" owner="test" toolshed="yyy">
     #    <package name="xorg_macros" version="1.17.1" />
     # </repository>
     error_message = ''
     name = elem.get('name')
     owner = elem.get('owner')
     # The name and owner attributes are always required, so if either are missing, return the error message.
     if not name or not owner:
         error_message = self.check_tag_attributes(elem)
         return False, elem, error_message
     altered = False
     toolshed = elem.get('toolshed')
     changeset_revision = elem.get('changeset_revision')
     # Over a short period of time a bug existed which caused the prior_installation_required attribute
     # to be set to False and included in the <repository> tag when a repository was exported along with
     # its dependencies.  The following will eliminate this problematic attribute upon import.
     prior_installation_required = elem.get('prior_installation_required')
     if prior_installation_required is not None and not asbool(prior_installation_required):
         del elem.attrib['prior_installation_required']
     sub_elems = [child_elem for child_elem in list(elem)]
     if len(sub_elems) > 0:
         # At this point, a <repository> tag will point only to a package.
         # <package name="xorg_macros" version="1.17.1" />
         # Coerce the list to an odict().
         sub_elements = odict()
         packages = []
         for sub_elem in sub_elems:
             sub_elem_type = sub_elem.tag
             sub_elem_name = sub_elem.get('name')
             sub_elem_version = sub_elem.get('version')
             if sub_elem_type and sub_elem_name and sub_elem_version:
                 packages.append((sub_elem_name, sub_elem_version))
         sub_elements['packages'] = packages
     else:
         # Set to None.
         sub_elements = None
     if self.unpopulate:
         # We're exporting the repository, so eliminate all toolshed and changeset_revision attributes
         # from the <repository> tag.
         if toolshed or changeset_revision:
             attributes = odict()
             attributes['name'] = name
             attributes['owner'] = owner
             prior_installation_required = elem.get('prior_installation_required')
             if asbool(prior_installation_required):
                 attributes['prior_installation_required'] = 'True'
             new_elem = xml_util.create_element('repository', attributes=attributes, sub_elements=sub_elements)
             altered = True
         return altered, new_elem, error_message
     # From here on we're populating the toolshed and changeset_revision attributes if necessary.
     if not toolshed:
         # Default the setting to the current tool shed.
         toolshed = str(url_for('/', qualified=True)).rstrip('/')
         elem.attrib['toolshed'] = toolshed
         altered = True
     if not changeset_revision:
         # Populate the changeset_revision attribute with the latest installable metadata revision for
         # the defined repository.  We use the latest installable revision instead of the latest metadata
         # revision to ensure that the contents of the revision are valid.
         repository = repository_util.get_repository_by_name_and_owner(self.app, name, owner)
         if repository:
             repo = hg_util.get_repo_for_repository(self.app,
                                                    repository=repository,
                                                    repo_path=None,
                                                    create=False)
             lastest_installable_changeset_revision = \
                 metadata_util.get_latest_downloadable_changeset_revision(self.app, repository, repo)
             if lastest_installable_changeset_revision != hg_util.INITIAL_CHANGELOG_HASH:
                 elem.attrib['changeset_revision'] = lastest_installable_changeset_revision
                 altered = True
             else:
                 error_message = 'Invalid latest installable changeset_revision %s ' % \
                     str(lastest_installable_changeset_revision)
                 error_message += 'retrieved for repository %s owned by %s.  ' % (str(name), str(owner))
         else:
             error_message = 'Unable to locate repository with name %s and owner %s.  ' % (str(name), str(owner))
     return altered, elem, error_message
Ejemplo n.º 52
0
def handle_email_alerts(app,
                        host,
                        repository,
                        content_alert_str='',
                        new_repo_alert=False,
                        admin_only=False):
    """
    There are 2 complementary features that enable a tool shed user to receive email notification:
    1. Within User Preferences, they can elect to receive email when the first (or first valid)
       change set is produced for a new repository.
    2. When viewing or managing a repository, they can check the box labeled "Receive email alerts"
       which caused them to receive email alerts when updates to the repository occur.  This same feature
       is available on a per-repository basis on the repository grid within the tool shed.

    There are currently 4 scenarios for sending email notification when a change is made to a repository:
    1. An admin user elects to receive email when the first change set is produced for a new repository
       from User Preferences.  The change set does not have to include any valid content.  This allows for
       the capture of inappropriate content being uploaded to new repositories.
    2. A regular user elects to receive email when the first valid change set is produced for a new repository
       from User Preferences.  This differs from 1 above in that the user will not receive email until a
       change set tha tincludes valid content is produced.
    3. An admin user checks the "Receive email alerts" check box on the manage repository page.  Since the
       user is an admin user, the email will include information about both HTML and image content that was
       included in the change set.
    4. A regular user checks the "Receive email alerts" check box on the manage repository page.  Since the
       user is not an admin user, the email will not include any information about both HTML and image content
       that was included in the change set.
    """
    sa_session = app.model.context.current
    repo = hg_util.get_repo_for_repository(app,
                                           repository=repository,
                                           repo_path=None,
                                           create=False)
    sharable_link = tool_shed.util.repository_util.generate_sharable_link_for_repository_in_tool_shed(
        repository, changeset_revision=None)
    smtp_server = app.config.smtp_server
    if smtp_server and (new_repo_alert or repository.email_alerts):
        # Send email alert to users that want them.
        if app.config.email_from is not None:
            email_from = app.config.email_from
        elif host.split(':')[0] in ['localhost', '127.0.0.1', '0.0.0.0']:
            email_from = 'galaxy-no-reply@' + socket.getfqdn()
        else:
            email_from = 'galaxy-no-reply@' + host.split(':')[0]
        tip_changeset = repo.changelog.tip()
        ctx = repo.changectx(tip_changeset)
        try:
            username = ctx.user().split()[0]
        except:
            username = ctx.user()
        # We'll use 2 template bodies because we only want to send content
        # alerts to tool shed admin users.
        if new_repo_alert:
            template = new_repo_email_alert_template
        else:
            template = email_alert_template
        display_date = hg_util.get_readable_ctx_date(ctx)
        admin_body = string.Template(template).safe_substitute(
            host=host,
            sharable_link=sharable_link,
            repository_name=repository.name,
            revision='%s:%s' % (str(ctx.rev()), ctx),
            display_date=display_date,
            description=ctx.description(),
            username=username,
            content_alert_str=content_alert_str)
        body = string.Template(template).safe_substitute(
            host=host,
            sharable_link=sharable_link,
            repository_name=repository.name,
            revision='%s:%s' % (str(ctx.rev()), ctx),
            display_date=display_date,
            description=ctx.description(),
            username=username,
            content_alert_str='')
        admin_users = app.config.get("admin_users", "").split(",")
        frm = email_from
        if new_repo_alert:
            subject = "Galaxy tool shed alert for new repository named %s" % str(
                repository.name)
            subject = subject[:80]
            email_alerts = []
            for user in sa_session.query( app.model.User ) \
                                  .filter( and_( app.model.User.table.c.deleted == false(),
                                                 app.model.User.table.c.new_repo_alert == true() ) ):
                if admin_only:
                    if user.email in admin_users:
                        email_alerts.append(user.email)
                else:
                    email_alerts.append(user.email)
        else:
            subject = "Galaxy tool shed update alert for repository named %s" % str(
                repository.name)
            email_alerts = json.loads(repository.email_alerts)
        for email in email_alerts:
            to = email.strip()
            # Send it
            try:
                if to in admin_users:
                    util.send_mail(frm, to, subject, admin_body, app.config)
                else:
                    util.send_mail(frm, to, subject, body, app.config)
            except Exception:
                log.exception(
                    "An error occurred sending a tool shed repository update alert by email."
                )
Ejemplo n.º 53
0
    def repository_dependencies(self, trans, id, **kwd):
        """
        GET /api/repository_revisions/{encoded repository_metadata id}/repository_dependencies

        Returns a list of dictionaries that each define a specific downloadable revision of a
        repository in the Tool Shed.  This method returns dictionaries with more information in
        them than other methods in this controller.  The information about repository_metdata is
        enhanced to include information about the repository (e.g., name, owner, etc) associated
        with the repository_metadata record.

        :param id: the encoded id of the `RepositoryMetadata` object
        """
        # Example URL: http://localhost:9009/api/repository_revisions/repository_dependencies/bb125606ff9ea620
        repository_dependencies_dicts = []
        repository_metadata = metadata_util.get_repository_metadata_by_id(
            trans.app, id)
        if repository_metadata is None:
            log.debug('Invalid repository_metadata id received: %s' % str(id))
            return repository_dependencies_dicts
        metadata = repository_metadata.metadata
        if metadata is None:
            log.debug(
                'The repository_metadata record with id %s has no metadata.' %
                str(id))
            return repository_dependencies_dicts
        if 'repository_dependencies' in metadata:
            rd_tups = metadata['repository_dependencies'][
                'repository_dependencies']
            for rd_tup in rd_tups:
                tool_shed, name, owner, changeset_revision = rd_tup[0:4]
                repository_dependency = suc.get_repository_by_name_and_owner(
                    trans.app, name, owner)
                if repository_dependency is None:
                    log.dbug(
                        'Cannot locate repository dependency %s owned by %s.' %
                        (name, owner))
                    continue
                repository_dependency_id = trans.security.encode_id(
                    repository_dependency.id)
                repository_dependency_repository_metadata = \
                    suc.get_repository_metadata_by_changeset_revision( trans.app, repository_dependency_id, changeset_revision )
                if repository_dependency_repository_metadata is None:
                    # The changeset_revision column in the repository_metadata table has been updated with a new
                    # value value, so find the changeset_revision to which we need to update.
                    repo = hg_util.get_repo_for_repository(
                        trans.app,
                        repository=repository_dependency,
                        repo_path=None,
                        create=False)
                    new_changeset_revision = suc.get_next_downloadable_changeset_revision(
                        repository_dependency, repo, changeset_revision)
                    repository_dependency_repository_metadata = \
                        suc.get_repository_metadata_by_changeset_revision( trans.app,
                                                                           repository_dependency_id,
                                                                           new_changeset_revision )
                    if repository_dependency_repository_metadata is None:
                        decoded_repository_dependency_id = trans.security.decode_id(
                            repository_dependency_id)
                        debug_msg = 'Cannot locate repository_metadata with id %d for repository dependency %s owned by %s ' % \
                            ( decoded_repository_dependency_id, str( name ), str( owner ) )
                        debug_msg += 'using either of these changeset_revisions: %s, %s.' % \
                            ( str( changeset_revision ), str( new_changeset_revision ) )
                        log.debug(debug_msg)
                        continue
                    else:
                        changeset_revision = new_changeset_revision
                repository_dependency_metadata_dict = \
                    repository_dependency_repository_metadata.to_dict( view='element',
                                                                       value_mapper=self.__get_value_mapper( trans ) )
                repository_dependency_dict = repository_dependency.to_dict(
                    view='element',
                    value_mapper=self.__get_value_mapper(trans))
                # We need to be careful with the entries in our repository_dependency_dict here since this Tool Shed API
                # controller is working with repository_metadata records.  The above to_dict() method returns a dictionary
                # with an id entry for the repository record.  However, all of the other methods in this controller have
                # the id entry associated with a repository_metadata record id.  To avoid confusion, we'll update the
                # repository_dependency_metadata_dict with entries from the repository_dependency_dict without using the
                # Python dictionary update() method because we do not want to overwrite existing entries.
                for k, v in repository_dependency_dict.items():
                    if k not in repository_dependency_metadata_dict:
                        repository_dependency_metadata_dict[k] = v
                repository_dependency_metadata_dict['url'] = web.url_for(
                    controller='repositories',
                    action='show',
                    id=repository_dependency_id)
                repository_dependencies_dicts.append(
                    repository_dependency_metadata_dict)
        return repository_dependencies_dicts
def handle_directory_changes(app, host, username, repository, full_path,
                             filenames_in_archive,
                             remove_repo_files_not_in_tar, new_repo_alert,
                             commit_message, undesirable_dirs_removed,
                             undesirable_files_removed):
    repo = hg_util.get_repo_for_repository(app,
                                           repository=repository,
                                           repo_path=None,
                                           create=False)
    content_alert_str = ''
    files_to_remove = []
    filenames_in_archive = [
        os.path.join(full_path, name) for name in filenames_in_archive
    ]
    if remove_repo_files_not_in_tar and not repository.is_new(app):
        # We have a repository that is not new (it contains files), so discover those files that are in the
        # repository, but not in the uploaded archive.
        for root, dirs, files in os.walk(full_path):
            if root.find('.hg') < 0 and root.find('hgrc') < 0:
                for undesirable_dir in UNDESIRABLE_DIRS:
                    if undesirable_dir in dirs:
                        dirs.remove(undesirable_dir)
                        undesirable_dirs_removed += 1
                for undesirable_file in UNDESIRABLE_FILES:
                    if undesirable_file in files:
                        files.remove(undesirable_file)
                        undesirable_files_removed += 1
                for name in files:
                    full_name = os.path.join(root, name)
                    if full_name not in filenames_in_archive:
                        files_to_remove.append(full_name)
        for repo_file in files_to_remove:
            # Remove files in the repository (relative to the upload point) that are not in
            # the uploaded archive.
            try:
                hg_util.remove_file(repo.ui, repo, repo_file, force=True)
            except Exception as e:
                log.debug(
                    "Error removing files using the mercurial API, so trying a different approach, the error was: %s"
                    % str(e))
                relative_selected_file = repo_file.split(
                    'repo_%d' % repository.id)[1].lstrip('/')
                repo.dirstate.remove(relative_selected_file)
                repo.dirstate.write()
                absolute_selected_file = os.path.abspath(repo_file)
                if os.path.isdir(absolute_selected_file):
                    try:
                        os.rmdir(absolute_selected_file)
                    except OSError as e:
                        # The directory is not empty.
                        pass
                elif os.path.isfile(absolute_selected_file):
                    os.remove(absolute_selected_file)
                    dir = os.path.split(absolute_selected_file)[0]
                    try:
                        os.rmdir(dir)
                    except OSError as e:
                        # The directory is not empty.
                        pass
    # See if any admin users have chosen to receive email alerts when a repository is updated.
    # If so, check every uploaded file to ensure content is appropriate.
    check_contents = check_file_contents_for_email_alerts(app)
    for filename_in_archive in filenames_in_archive:
        # Check file content to ensure it is appropriate.
        if check_contents and os.path.isfile(filename_in_archive):
            content_alert_str += check_file_content_for_html_and_images(
                filename_in_archive)
        hg_util.add_changeset(repo.ui, repo, filename_in_archive)
        if filename_in_archive.endswith('tool_data_table_conf.xml.sample'):
            # Handle the special case where a tool_data_table_conf.xml.sample file is being uploaded
            # by parsing the file and adding new entries to the in-memory app.tool_data_tables
            # dictionary.
            stdtm = ShedToolDataTableManager(app)
            error, message = stdtm.handle_sample_tool_data_table_conf_file(
                filename_in_archive, persist=False)
            if error:
                return False, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed
    hg_util.commit_changeset(repo.ui,
                             repo,
                             full_path_to_changeset=full_path,
                             username=username,
                             message=commit_message)
    admin_only = len(repository.downloadable_revisions) != 1
    suc.handle_email_alerts(app,
                            host,
                            repository,
                            content_alert_str=content_alert_str,
                            new_repo_alert=new_repo_alert,
                            admin_only=admin_only)
    return True, '', files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed
def check_and_update_repository_metadata( app, info_only=False, verbosity=1 ):
    """
    This method will iterate through all records in the repository_metadata
    table, checking each one for tool metadata, then checking the tool
    metadata for tests.  Each tool's metadata should look something like:
    {
      "add_to_tool_panel": true,
      "description": "",
      "guid": "toolshed.url:9009/repos/owner/name/tool_id/1.2.3",
      "id": "tool_wrapper",
      "name": "Map with Tool Wrapper",
      "requirements": [],
      "tests": [],
      "tool_config": "database/community_files/000/repo_1/tool_wrapper.xml",
      "tool_type": "default",
      "version": "1.2.3",
      "version_string_cmd": null
    }
    If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository)
    not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install
    and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision
    not to be tested.
    """
    start = time.time()
    skip_metadata_ids = []
    checked_repository_ids = []
    tool_count = 0
    has_tests = 0
    no_tests = 0
    valid_revisions = 0
    invalid_revisions = 0
    records_checked = 0
    # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
    print '# -------------------------------------------------------------------------------------------'
    print '# The skip_tool_test setting has been set for the following repository revision, so they will not be tested.'
    skip_metadata_ids = []
    for skip_tool_test in app.sa_session.query( app.model.SkipToolTest ):
        print '# repository_metadata_id: %s, changeset_revision: %s' % \
            ( str( skip_tool_test.repository_metadata_id ), str( skip_tool_test.initial_changeset_revision ) )
        print 'reason: %s' % str( skip_tool_test.comment )
        skip_metadata_ids.append( skip_tool_test.repository_metadata_id )
    # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test,
    # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable,
    # because it's redundant to test a revision that a user can't install.
    for repository_metadata in app.sa_session.query( app.model.RepositoryMetadata ) \
                                             .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
                                                            app.model.RepositoryMetadata.table.c.includes_tools == True,
                                                            app.model.RepositoryMetadata.table.c.do_not_test == False ) ):
        # Initialize some items.
        missing_test_components = []
        revision_has_test_data = False
        testable_revision = False
        repository = repository_metadata.repository
        records_checked += 1
        # Check the next repository revision.
        changeset_revision = str( repository_metadata.changeset_revision )
        name = repository.name
        owner = repository.user.username
        metadata = repository_metadata.metadata
        repository = repository_metadata.repository
        if repository.id not in checked_repository_ids:
            checked_repository_ids.append( repository.id )
        print '# -------------------------------------------------------------------------------------------'
        print '# Checking revision %s of %s owned by %s.' % ( changeset_revision, name, owner )
        if repository_metadata.id in skip_metadata_ids:
            print'# Skipping revision %s of %s owned by %s because the skip_tool_test setting has been set.' % ( changeset_revision, name, owner )
            continue
        # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
        # only repositories that contain tools.
        tool_dicts = metadata.get( 'tools', None )
        if tool_dicts is not None:
            # Clone the repository up to the changeset revision we're checking.
            repo_dir = repository.repo_path( app )
            hg_util.get_repo_for_repository( app, repository=None, repo_path=repo_dir, create=False )
            work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-cafr"  )
            cloned_ok, error_message = hg_util.clone_repository( repo_dir, work_dir, changeset_revision )
            if cloned_ok:
                # Iterate through all the directories in the cloned changeset revision and determine whether there's a
                # directory named test-data. If this directory is not present update the metadata record for the changeset
                # revision we're checking.
                for root, dirs, files in os.walk( work_dir ):
                    if '.hg' in dirs:
                        dirs.remove( '.hg' )
                    if 'test-data' in dirs:
                        revision_has_test_data = True
                        test_data_path = os.path.join( root, dirs[ dirs.index( 'test-data' ) ] )
                        break
            if revision_has_test_data:
                print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
            else:
                print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
            print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
                ( changeset_revision, name, owner )
            # Inspect each tool_dict for defined functional tests.  If there
            # are no tests, this tool should not be tested, since the tool
            # functional tests only report failure if the test itself fails,
            # not if it's missing or undefined. Filtering out those
            # repositories at this step will reduce the number of "false
            # negatives" the automated functional test framework produces.
            for tool_dict in tool_dicts:
                failure_reason = ''
                problem_found = False
                tool_has_defined_tests = False
                tool_has_test_files = False
                missing_test_files = []
                tool_count += 1
                tool_id = tool_dict[ 'id' ]
                tool_version = tool_dict[ 'version' ]
                tool_guid = tool_dict[ 'guid' ]
                if verbosity >= 1:
                    print "# Checking tool ID '%s' in changeset revision %s of %s." % ( tool_id, changeset_revision, name )
                defined_test_dicts = tool_dict.get( 'tests', None )
                if defined_test_dicts is not None:
                    # We need to inspect the <test> tags because the following tags...
                    # <tests>
                    # </tests>
                    # ...will produce the following metadata:
                    # "tests": []
                    # And the following tags...
                    # <tests>
                    #     <test>
                    #    </test>
                    # </tests>
                    # ...will produce the following metadata:
                    # "tests":
                    #    [{"inputs": [], "name": "Test-1", "outputs": [], "required_files": []}]
                    for defined_test_dict in defined_test_dicts:
                        inputs = defined_test_dict.get( 'inputs', [] )
                        outputs = defined_test_dict.get( 'outputs', [] )
                        if inputs and outputs:
                            # At least one tool within the repository has a valid <test> tag.
                            tool_has_defined_tests = True
                            break
                if tool_has_defined_tests:
                    print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
                        ( tool_id, changeset_revision, name )
                    has_tests += 1
                else:
                    print '# No functional tests defined for %s.' % tool_id
                    no_tests += 1
                if tool_has_defined_tests and revision_has_test_data:
                    missing_test_files = check_for_missing_test_files( defined_test_dicts, test_data_path )
                    if missing_test_files:
                        print "# Tool id '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
                            ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
                    else:
                        tool_has_test_files = True
                if not revision_has_test_data:
                    failure_reason += 'Repository does not have a test-data directory. '
                    problem_found = True
                if not tool_has_defined_tests:
                    failure_reason += 'Functional test definitions missing for %s. ' % tool_id
                    problem_found = True
                if missing_test_files:
                    failure_reason += 'One or more test files are missing for tool %s: %s' % ( tool_id, ', '.join( missing_test_files ) )
                    problem_found = True
                test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid, missing_components=failure_reason )
                # Only append this error dict if it hasn't already been added.
                if problem_found:
                    if test_errors not in missing_test_components:
                        missing_test_components.append( test_errors )
                if tool_has_defined_tests and tool_has_test_files:
                    print '# Revision %s of %s owned by %s is a testable revision.' % ( changeset_revision, name, owner )
                    testable_revision = True
            # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
            if os.path.exists( work_dir ):
                shutil.rmtree( work_dir )
            if not missing_test_components:
                valid_revisions += 1
                print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
            else:
                invalid_revisions += 1
                print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
                if verbosity >= 1:
                    for missing_test_component in missing_test_components:
                        if 'missing_components' in missing_test_component:
                            print '# %s' % missing_test_component[ 'missing_components' ]
            if not info_only:
                # Get or create the list of tool_test_results dictionaries.
                if repository_metadata.tool_test_results is not None:
                    # We'll listify the column value in case it uses the old approach of storing the results of only a single test run.
                    tool_test_results_dicts = listify( repository_metadata.tool_test_results )
                else:
                    tool_test_results_dicts = []
                if tool_test_results_dicts:
                    # Inspect the tool_test_results_dict for the last test run in case it contains only a test_environment
                    # entry.  This will occur with multiple runs of this script without running the associated
                    # install_and_test_tool_sed_repositories.sh script which will further populate the tool_test_results_dict.
                    tool_test_results_dict = tool_test_results_dicts[ 0 ]
                    if len( tool_test_results_dict ) <= 1:
                        # We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only
                        # a test_environment entry.  If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts
                        # since it will be re-inserted later.
                        tool_test_results_dict = tool_test_results_dicts.pop( 0 )
                    elif (len( tool_test_results_dict ) == 2 and
                          'test_environment' in tool_test_results_dict and 'missing_test_components' in tool_test_results_dict):
                        # We can re-use tool_test_results_dict if its only entries are "test_environment" and "missing_test_components".
                        # In this case, some tools are missing tests components while others are not.
                        tool_test_results_dict = tool_test_results_dicts.pop( 0 )
                    else:
                        # The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
                        tool_test_results_dict = {}
                else:
                    # Create a new dictionary for the most recent test run.
                    tool_test_results_dict = {}
                test_environment_dict = tool_test_results_dict.get( 'test_environment', {} )
                # Add the current time as the approximate time that this test run occurs.  A similar value will also be
                # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
                # may be configured to store multiple test run results, so each must be associated with a time stamp.
                now = time.strftime( "%Y-%m-%d %H:%M:%S" )
                test_environment_dict[ 'time_tested' ] = now
                test_environment_dict[ 'tool_shed_database_version' ] = get_database_version( app )
                test_environment_dict[ 'tool_shed_mercurial_version' ] = __version__.version
                test_environment_dict[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
                tool_test_results_dict[ 'test_environment' ] = test_environment_dict
                # The repository_metadata.time_last_tested column is not changed by this script since no testing is performed here.
                if missing_test_components:
                    # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been
                    # found in this revision, and:
                    # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision.
                    #    In this case, the revision will never be updated with the missing components, and re-testing it would be redundant.
                    # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable
                    #    revision. In this case, if the repository is updated with test data or functional tests, the downloadable
                    #    changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable
                    #    changeset revision will be created, either of which will be automatically checked and flagged as appropriate.
                    #    In the install and test script, this behavior is slightly different, since we do want to always run functional
                    #    tests on the most recent downloadable changeset revision.
                    if should_set_do_not_test_flag( app, repository, changeset_revision, testable_revision ):
                        print "# Setting do_not_test to True on revision %s of %s owned by %s because it is missing test components" % (
                            changeset_revision, name, owner
                        )
                        print "# and it is not the latest downloadable revision."
                        repository_metadata.do_not_test = True
                    if not testable_revision:
                        # Even though some tools may be missing test components, it may be possible to test other tools.  Since the
                        # install and test framework filters out repositories marked as missing test components, we'll set it only if
                        # no tools can be tested.
                        print '# Setting missing_test_components to True for revision %s of %s owned by %s because all tools are missing test components.' % (
                            changeset_revision, name, owner
                        )
                        repository_metadata.missing_test_components = True
                        print "# Setting tools_functionally_correct to False on revision %s of %s owned by %s because it is missing test components" % (
                            changeset_revision, name, owner
                        )
                        repository_metadata.tools_functionally_correct = False
                    tool_test_results_dict[ 'missing_test_components' ] = missing_test_components
                # Store only the configured number of test runs.
                num_tool_test_results_saved = int( app.config.num_tool_test_results_saved )
                if len( tool_test_results_dicts ) >= num_tool_test_results_saved:
                    test_results_index = num_tool_test_results_saved - 1
                    new_tool_test_results_dicts = tool_test_results_dicts[ :test_results_index ]
                else:
                    new_tool_test_results_dicts = [ d for d in tool_test_results_dicts ]
                # Insert the new element into the first position in the list.
                new_tool_test_results_dicts.insert( 0, tool_test_results_dict )
                repository_metadata.tool_test_results = new_tool_test_results_dicts
                app.sa_session.add( repository_metadata )
                app.sa_session.flush()
    stop = time.time()
    print '# -------------------------------------------------------------------------------------------'
    print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, records_checked )
    print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions
    print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions
    print '# Found %d tools without functional tests.' % no_tests
    print '# Found %d tools with functional tests.' % has_tests
    if info_only:
        print '# Database not updated, info_only set.'
    print "# Elapsed time: ", stop - start
    print "#############################################################################"
Ejemplo n.º 56
0
 def allow_push(self, app):
     repo = hg_util.get_repo_for_repository(app, repository=self)
     return repo.ui.config('web', 'allow_push')
Ejemplo n.º 57
0
 def handle_elem( self, elem ):
     """Populate or unpopulate the changeset_revision and toolshed attributes of repository tags."""
     # <repository name="molecule_datatypes" owner="test" changeset_revision="1a070566e9c6" />
     # <repository changeset_revision="xxx" name="package_xorg_macros_1_17_1" owner="test" toolshed="yyy">
     #    <package name="xorg_macros" version="1.17.1" />
     # </repository>
     error_message = ''
     name = elem.get( 'name' )
     owner = elem.get( 'owner' )
     # The name and owner attributes are always required, so if either are missing, return the error message.
     if not name or not owner:
         error_message = self.check_tag_attributes( elem )
         return False, elem, error_message
     altered = False
     toolshed = elem.get( 'toolshed' )
     changeset_revision = elem.get( 'changeset_revision' )
     # Over a short period of time a bug existed which caused the prior_installation_required attribute
     # to be set to False and included in the <repository> tag when a repository was exported along with
     # its dependencies.  The following will eliminate this problematic attribute upon import.
     prior_installation_required = elem.get( 'prior_installation_required' )
     if prior_installation_required is not None and not asbool( prior_installation_required ):
         del elem.attrib[ 'prior_installation_required' ]
     sub_elems = [ child_elem for child_elem in list( elem ) ]
     if len( sub_elems ) > 0:
         # At this point, a <repository> tag will point only to a package.
         # <package name="xorg_macros" version="1.17.1" />
         # Coerce the list to an odict().
         sub_elements = odict()
         packages = []
         for sub_elem in sub_elems:
             sub_elem_type = sub_elem.tag
             sub_elem_name = sub_elem.get( 'name' )
             sub_elem_version = sub_elem.get( 'version' )
             if sub_elem_type and sub_elem_name and sub_elem_version:
                 packages.append( ( sub_elem_name, sub_elem_version ) )
         sub_elements[ 'packages' ] = packages
     else:
         # Set to None.
         sub_elements = None
     if self.unpopulate:
         # We're exporting the repository, so eliminate all toolshed and changeset_revision attributes
         # from the <repository> tag.
         if toolshed or changeset_revision:
             attributes = odict()
             attributes[ 'name' ] = name
             attributes[ 'owner' ] = owner
             prior_installation_required = elem.get( 'prior_installation_required' )
             if asbool( prior_installation_required ):
                 attributes[ 'prior_installation_required' ] = 'True'
             new_elem = xml_util.create_element( 'repository', attributes=attributes, sub_elements=sub_elements )
             altered = True
         return altered, new_elem, error_message
     # From here on we're populating the toolshed and changeset_revision attributes if necessary.
     if not toolshed:
         # Default the setting to the current tool shed.
         toolshed = str( url_for( '/', qualified=True ) ).rstrip( '/' )
         elem.attrib[ 'toolshed' ] = toolshed
         altered = True
     if not changeset_revision:
         # Populate the changeset_revision attribute with the latest installable metadata revision for
         # the defined repository.  We use the latest installable revision instead of the latest metadata
         # revision to ensure that the contents of the revision are valid.
         repository = suc.get_repository_by_name_and_owner( self.app, name, owner )
         if repository:
             repo = hg_util.get_repo_for_repository( self.app,
                                                     repository=repository,
                                                     repo_path=None,
                                                     create=False )
             lastest_installable_changeset_revision = \
                 suc.get_latest_downloadable_changeset_revision( self.app, repository, repo )
             if lastest_installable_changeset_revision != hg_util.INITIAL_CHANGELOG_HASH:
                 elem.attrib[ 'changeset_revision' ] = lastest_installable_changeset_revision
                 altered = True
             else:
                 error_message = 'Invalid latest installable changeset_revision %s ' % \
                     str( lastest_installable_changeset_revision )
                 error_message += 'retrieved for repository %s owned by %s.  ' % ( str( name ), str( owner ) )
         else:
             error_message = 'Unable to locate repository with name %s and owner %s.  ' % ( str( name ), str( owner ) )
     return altered, elem, error_message
Ejemplo n.º 58
0
 def is_new(self, app):
     repo = hg_util.get_repo_for_repository(app, repository=self)
     tip_ctx = repo.changectx(repo.changelog.tip())
     return tip_ctx.rev() < 0