def upload( self, trans, **kwd ): message = kwd.get( 'message', '' ) status = kwd.get( 'status', 'done' ) commit_message = kwd.get( 'commit_message', 'Uploaded' ) category_ids = util.listify( kwd.get( 'category_id', '' ) ) categories = suc.get_categories( trans ) repository_id = kwd.get( 'repository_id', '' ) repository = suc.get_repository_in_tool_shed( trans, repository_id ) repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) uncompress_file = util.string_as_bool( kwd.get( 'uncompress_file', 'true' ) ) remove_repo_files_not_in_tar = util.string_as_bool( kwd.get( 'remove_repo_files_not_in_tar', 'true' ) ) uploaded_file = None upload_point = commit_util.get_upload_point( repository, **kwd ) tip = repository.tip( trans.app ) file_data = kwd.get( 'file_data', '' ) url = kwd.get( 'url', '' ) # Part of the upload process is sending email notification to those that have registered to # receive them. One scenario occurs when the first change set is produced for the repository. # See the suc.handle_email_alerts() method for the definition of the scenarios. new_repo_alert = repository.is_new( trans.app ) uploaded_directory = None if kwd.get( 'upload_button', False ): if file_data == '' and url == '': message = 'No files were entered on the upload form.' status = 'error' uploaded_file = None elif url and url.startswith( 'hg' ): # Use mercurial clone to fetch repository, contents will then be copied over. uploaded_directory = tempfile.mkdtemp() repo_url = 'http%s' % url[ len( 'hg' ): ] repo_url = repo_url.encode( 'ascii', 'replace' ) try: commands.clone( suc.get_configured_ui(), repo_url, uploaded_directory ) except Exception, e: message = 'Error uploading via mercurial clone: %s' % suc.to_html_string( str( e ) ) status = 'error' suc.remove_dir( uploaded_directory ) uploaded_directory = None elif url: valid_url = True try: stream = urllib.urlopen( url ) except Exception, e: valid_url = False message = 'Error uploading file via http: %s' % str( e ) status = 'error' uploaded_file = None if valid_url: fd, uploaded_file_name = tempfile.mkstemp() uploaded_file = open( uploaded_file_name, 'wb' ) while 1: chunk = stream.read( util.CHUNK_SIZE ) if not chunk: break uploaded_file.write( chunk ) uploaded_file.flush() uploaded_file_filename = url.split( '/' )[ -1 ] isempty = os.path.getsize( os.path.abspath( uploaded_file_name ) ) == 0
def get_repo_info_dicts( trans, tool_shed_url, repository_id, changeset_revision ): repository = suc.get_repository_in_tool_shed( trans, repository_id ) repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, repository_id, changeset_revision ) # Get a dictionary of all repositories upon which the contents of the current repository_metadata record depend. repository_dependencies = \ repository_dependency_util.get_repository_dependencies_for_changeset_revision( trans=trans, repository=repository, repository_metadata=repository_metadata, toolshed_base_url=str( web.url_for( '/', qualified=True ) ).rstrip( '/' ), key_rd_dicts_to_be_processed=None, all_repository_dependencies=None, handled_key_rd_dicts=None ) repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) ctx = suc.get_changectx_for_changeset( repo, changeset_revision ) repo_info_dict = {} # Cast unicode to string. repo_info_dict[ str( repository.name ) ] = ( str( repository.description ), suc.generate_clone_url_for_repository_in_tool_shed( trans, repository ), str( changeset_revision ), str( ctx.rev() ), str( repository.user.username ), repository_dependencies, None ) all_required_repo_info_dict = common_install_util.get_required_repo_info_dicts( trans, tool_shed_url, [ repo_info_dict ] ) all_repo_info_dicts = all_required_repo_info_dict.get( 'all_repo_info_dicts', [] ) return all_repo_info_dicts
def handle_repository_dependency_elem(trans, elem): # <repository name="molecule_datatypes" owner="test" changeset_revision="1a070566e9c6" /> error_message = '' name = elem.get('name') owner = elem.get('owner') if not name or not owner: error_message = handle_missing_repository_attribute(elem) return False, elem, error_message populated = False toolshed = elem.get('toolshed') if not toolshed: # Default the setting to the current tool shed. toolshed = str(url_for('/', qualified=True)).rstrip('/') elem.attrib['toolshed'] = toolshed populated = True changeset_revision = elem.get('changeset_revision') if not changeset_revision: # Populate the changeset_revision attribute with the latest installable metadata revision for the defined repository. # We use the latest installable revision instead of the latest metadata revision to ensure that the contents of the # revision are valid. repository = suc.get_repository_by_name_and_owner( trans.app, name, owner) if repository: repo_dir = repository.repo_path(trans.app) repo = hg.repository(suc.get_configured_ui(), repo_dir) lastest_installable_changeset_revision = suc.get_latest_downloadable_changeset_revision( trans, repository, repo) if lastest_installable_changeset_revision != suc.INITIAL_CHANGELOG_HASH: elem.attrib[ 'changeset_revision'] = lastest_installable_changeset_revision populated = True else: error_message = 'Unable to locate repository with name %s and owner %s. ' % ( str(name), str(owner)) return populated, elem, error_message
def get_ordered_installable_revisions( self, trans, name, owner, **kwd ): """ GET /api/repositories/get_ordered_installable_revisions :param name: the name of the Repository :param owner: the owner of the Repository Returns the ordered list of changeset revision hash strings that are associated with installable revisions. As in the changelog, the list is ordered oldest to newest. """ # Example URL: http://localhost:9009/api/repositories/get_installable_revisions?name=add_column&owner=test if name and owner: # Get the repository information. repository = suc.get_repository_by_name_and_owner( trans.app, name, owner ) if repository is None: error_message = "Error in the Tool Shed repositories API in get_ordered_installable_revisions: " error_message += "cannot locate repository %s owned by %s." % ( str( name ), str( owner ) ) log.debug( error_message ) return [] repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) ordered_installable_revisions = suc.get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True ) return ordered_installable_revisions else: error_message = "Error in the Tool Shed repositories API in get_ordered_installable_revisions: " error_message += "invalid name %s or owner %s received." % ( str( name ), str( owner ) ) log.debug( error_message ) return []
def get_ordered_installable_revisions(self, trans, name, owner, **kwd): """ GET /api/repositories/get_ordered_installable_revisions :param name: the name of the Repository :param owner: the owner of the Repository Returns the ordered list of changeset revision hash strings that are associated with installable revisions. As in the changelog, the list is ordered oldest to newest. """ # Example URL: http://localhost:9009/api/repositories/get_installable_revisions?name=add_column&owner=test try: # Get the repository information. repository = suc.get_repository_by_name_and_owner( trans.app, name, owner) encoded_repository_id = trans.security.encode_id(repository.id) repo_dir = repository.repo_path(trans.app) repo = hg.repository(suc.get_configured_ui(), repo_dir) ordered_installable_revisions = suc.get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True) return ordered_installable_revisions except Exception, e: message = "Error in the Tool Shed repositories API in get_ordered_installable_revisions: %s" % str( e) log.error(message, exc_info=True) trans.response.status = 500 return message
def upload_directory( self, trans, repository, uploaded_directory, upload_point, remove_repo_files_not_in_tar, commit_message, new_repo_alert ): repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) undesirable_dirs_removed = 0 undesirable_files_removed = 0 if upload_point is not None: full_path = os.path.abspath( os.path.join( repo_dir, upload_point ) ) else: full_path = os.path.abspath( repo_dir ) filenames_in_archive = [] for root, dirs, files in os.walk( uploaded_directory ): for uploaded_file in files: relative_path = os.path.normpath( os.path.join( os.path.relpath( root, uploaded_directory ), uploaded_file ) ) if repository.type == rt_util.TOOL_DEPENDENCY_DEFINITION: ok = os.path.basename( uploaded_file ) == suc.TOOL_DEPENDENCY_DEFINITION_FILENAME else: ok = os.path.basename( uploaded_file ) not in commit_util.UNDESIRABLE_FILES if ok: for file_path_item in relative_path.split( '/' ): if file_path_item in commit_util.UNDESIRABLE_DIRS: undesirable_dirs_removed += 1 ok = False break else: undesirable_files_removed += 1 if ok: uploaded_file_name = os.path.abspath( os.path.join( root, uploaded_file ) ) if os.path.split( uploaded_file_name )[ -1 ] == suc.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem, error_message = commit_util.handle_repository_dependencies_definition( trans, uploaded_file_name, unpopulate=False ) if error_message: return False, error_message, [], '', [], [] elif altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem ) shutil.move( tmp_filename, uploaded_file_name ) elif os.path.split( uploaded_file_name )[ -1 ] == suc.TOOL_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem, error_message = commit_util.handle_tool_dependencies_definition( trans, uploaded_file_name ) if error_message: return False, error_message, [], '', [], [] if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem ) shutil.move( tmp_filename, uploaded_file_name ) repo_path = os.path.join( full_path, relative_path ) repo_basedir = os.path.normpath( os.path.join( repo_path, os.path.pardir ) ) if not os.path.exists( repo_basedir ): os.makedirs( repo_basedir ) if os.path.exists( repo_path ): if os.path.isdir( repo_path ): shutil.rmtree( repo_path ) else: os.remove( repo_path ) shutil.move( os.path.join( uploaded_directory, relative_path ), repo_path ) filenames_in_archive.append( relative_path ) return commit_util.handle_directory_changes( trans, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar, new_repo_alert, commit_message, undesirable_dirs_removed, undesirable_files_removed )
def has_previous_repository_reviews( trans, repository, changeset_revision ): """Determine if a repository has a changeset revision review prior to the received changeset revision.""" repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ] for changeset in suc.reversed_upper_bounded_changelog( repo, changeset_revision ): previous_changeset_revision = str( repo.changectx( changeset ) ) if previous_changeset_revision in reviewed_revision_hashes: return True return False
def build_readme_files_dict( trans, repository, changeset_revision, metadata, tool_path=None ): """ Return a dictionary of valid readme file name <-> readme file content pairs for all readme files defined in the received metadata. Since the received changeset_revision (which is associated with the received metadata) may not be the latest installable changeset revision, the README file contents may not be available on disk. This method is used by both Galaxy and the Tool Shed. """ if trans.webapp.name == 'galaxy': can_use_disk_files = True else: repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) latest_downloadable_changeset_revision = suc.get_latest_downloadable_changeset_revision( trans, repository, repo ) can_use_disk_files = changeset_revision == latest_downloadable_changeset_revision readme_files_dict = {} if metadata: if 'readme_files' in metadata: for relative_path_to_readme_file in metadata[ 'readme_files' ]: readme_file_name = os.path.split( relative_path_to_readme_file )[ 1 ] if can_use_disk_files: if tool_path: full_path_to_readme_file = os.path.abspath( os.path.join( tool_path, relative_path_to_readme_file ) ) else: full_path_to_readme_file = os.path.abspath( relative_path_to_readme_file ) text = None try: f = open( full_path_to_readme_file, 'r' ) text = unicodify( f.read() ) f.close() except Exception, e: log.exception( "Error reading README file '%s' from disk: %s" % ( str( relative_path_to_readme_file ), str( e ) ) ) text = None if text: text_of_reasonable_length = suc.size_string( text ) if text_of_reasonable_length.find( '.. image:: ' ) >= 0: # Handle image display for README files that are contained in repositories in the tool shed or installed into Galaxy. lock = threading.Lock() lock.acquire( True ) try: text_of_reasonable_length = suc.set_image_paths( trans.app, trans.security.encode_id( repository.id ), text_of_reasonable_length ) except Exception, e: log.exception( "Exception in build_readme_files_dict, so images may not be properly displayed:\n%s" % str( e ) ) finally: lock.release() if readme_file_name.endswith( '.rst' ): text_of_reasonable_length = Template( rst_to_html( text_of_reasonable_length ), input_encoding='utf-8', output_encoding='utf-8', default_filters=[ 'decode.utf8' ], encoding_errors='replace' ) text_of_reasonable_length = text_of_reasonable_length.render( static_path=web.url_for( '/static' ), host_url=web.url_for( '/', qualified=True ) ) text_of_reasonable_length = unicodify( text_of_reasonable_length ) else: text_of_reasonable_length = suc.to_html_string( text_of_reasonable_length ) readme_files_dict[ readme_file_name ] = text_of_reasonable_length
def upload_tar( self, trans, repository, tar, uploaded_file, upload_point, remove_repo_files_not_in_tar, commit_message, new_repo_alert ): # Upload a tar archive of files. repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) undesirable_dirs_removed = 0 undesirable_files_removed = 0 ok, message = commit_util.check_archive( repository, tar ) if not ok: tar.close() uploaded_file.close() return ok, message, [], '', undesirable_dirs_removed, undesirable_files_removed else: if upload_point is not None: full_path = os.path.abspath( os.path.join( repo_dir, upload_point ) ) else: full_path = os.path.abspath( repo_dir ) filenames_in_archive = [] for tarinfo_obj in tar.getmembers(): ok = os.path.basename( tarinfo_obj.name ) not in commit_util.UNDESIRABLE_FILES if ok: for file_path_item in tarinfo_obj.name.split( '/' ): if file_path_item in commit_util.UNDESIRABLE_DIRS: undesirable_dirs_removed += 1 ok = False break else: undesirable_files_removed += 1 if ok: filenames_in_archive.append( tarinfo_obj.name ) # Extract the uploaded tar to the load_point within the repository hierarchy. tar.extractall( path=full_path ) tar.close() uploaded_file.close() for filename in filenames_in_archive: uploaded_file_name = os.path.join( full_path, filename ) if os.path.split( uploaded_file_name )[ -1 ] == suc.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem = commit_util.handle_repository_dependencies_definition( trans, uploaded_file_name ) if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem ) shutil.move( tmp_filename, uploaded_file_name ) elif os.path.split( uploaded_file_name )[ -1 ] == suc.TOOL_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem = commit_util.handle_tool_dependencies_definition( trans, uploaded_file_name ) if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem ) shutil.move( tmp_filename, uploaded_file_name ) return commit_util.handle_directory_changes( trans, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar, new_repo_alert, commit_message, undesirable_dirs_removed, undesirable_files_removed )
def handle_directory_changes(trans, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar, new_repo_alert, commit_message, undesirable_dirs_removed, undesirable_files_removed): repo_dir = repository.repo_path(trans.app) repo = hg.repository(suc.get_configured_ui(), repo_dir) content_alert_str = '' files_to_remove = [] filenames_in_archive = [ os.path.join(full_path, name) for name in filenames_in_archive ] if remove_repo_files_not_in_tar and not repository.is_new(trans.app): # We have a repository that is not new (it contains files), so discover those files that are in the repository, but not in the uploaded archive. for root, dirs, files in os.walk(full_path): if root.find('.hg') < 0 and root.find('hgrc') < 0: for undesirable_dir in UNDESIRABLE_DIRS: if undesirable_dir in dirs: dirs.remove(undesirable_dir) undesirable_dirs_removed += 1 for undesirable_file in UNDESIRABLE_FILES: if undesirable_file in files: files.remove(undesirable_file) undesirable_files_removed += 1 for name in files: full_name = os.path.join(root, name) if full_name not in filenames_in_archive: files_to_remove.append(full_name) for repo_file in files_to_remove: # Remove files in the repository (relative to the upload point) that are not in the uploaded archive. try: commands.remove(repo.ui, repo, repo_file, force=True) except Exception, e: log.debug( "Error removing files using the mercurial API, so trying a different approach, the error was: %s" % str(e)) relative_selected_file = selected_file.split( 'repo_%d' % repository.id)[1].lstrip('/') repo.dirstate.remove(relative_selected_file) repo.dirstate.write() absolute_selected_file = os.path.abspath(selected_file) if os.path.isdir(absolute_selected_file): try: os.rmdir(absolute_selected_file) except OSError, e: # The directory is not empty. pass elif os.path.isfile(absolute_selected_file): os.remove(absolute_selected_file) dir = os.path.split(absolute_selected_file)[0] try: os.rmdir(dir) except OSError, e: # The directory is not empty. pass
def get_value( self, trans, grid, repository ): # Restrict to revisions that have been reviewed. if repository.reviews: rval = '' repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) for review in repository.reviews: changeset_revision = review.changeset_revision rev, label = suc.get_rev_label_from_changeset_revision( repo, changeset_revision ) rval += '<a href="manage_repository_reviews_of_revision?id=%s&changeset_revision=%s">%s</a><br/>' % \ ( trans.security.encode_id( repository.id ), changeset_revision, label ) return rval return ''
def browse_review( self, trans, **kwd ): message = kwd.get( 'message', '' ) status = kwd.get( 'status', 'done' ) review = review_util.get_review( trans, kwd[ 'id' ] ) repository = review.repository repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) rev, changeset_revision_label = suc.get_rev_label_from_changeset_revision( repo, review.changeset_revision ) return trans.fill_template( '/webapps/tool_shed/repository_review/browse_review.mako', repository=repository, changeset_revision_label=changeset_revision_label, review=review, message=message, status=status )
def has_previous_repository_reviews(trans, repository, changeset_revision): """Determine if a repository has a changeset revision review prior to the received changeset revision.""" repo = hg.repository(suc.get_configured_ui(), repository.repo_path(trans.app)) reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ] for changeset in suc.reversed_upper_bounded_changelog( repo, changeset_revision): previous_changeset_revision = str(repo.changectx(changeset)) if previous_changeset_revision in reviewed_revision_hashes: return True return False
def archive_repository_revision( trans, ui, repository, archive_dir, changeset_revision ): '''Create an un-versioned archive of a repository.''' repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) options_dict = suc.get_mercurial_default_options_dict( 'archive' ) options_dict[ 'rev' ] = changeset_revision error_message = '' return_code = None try: return_code = commands.archive( ui, repo, archive_dir, **options_dict ) except Exception, e: error_message = "Error attempting to archive revision <b>%s</b> of repository %s: %s\nReturn code: %s\n" % \ ( str( changeset_revision ), str( repository.name ), str( e ), str( return_code ) ) log.exception( error_message )
def get_value(self, trans, grid, repository): # Restrict to revisions that have been reviewed. if repository.reviews: rval = '' repo = hg.repository(suc.get_configured_ui(), repository.repo_path(trans.app)) for review in repository.reviews: changeset_revision = review.changeset_revision rev, label = suc.get_rev_label_from_changeset_revision( repo, changeset_revision) rval += '<a href="manage_repository_reviews_of_revision?id=%s&changeset_revision=%s">%s</a><br/>' % \ ( trans.security.encode_id( repository.id ), changeset_revision, label ) return rval return ''
def get_previous_repository_reviews( trans, repository, changeset_revision ): """Return an ordered dictionary of repository reviews up to and including the received changeset revision.""" repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ] previous_reviews_dict = odict() for changeset in suc.reversed_upper_bounded_changelog( repo, changeset_revision ): previous_changeset_revision = str( repo.changectx( changeset ) ) if previous_changeset_revision in reviewed_revision_hashes: previous_rev, previous_changeset_revision_label = suc.get_rev_label_from_changeset_revision( repo, previous_changeset_revision ) revision_reviews = get_reviews_by_repository_id_changeset_revision( trans, trans.security.encode_id( repository.id ), previous_changeset_revision ) previous_reviews_dict[ previous_changeset_revision ] = dict( changeset_revision_label=previous_changeset_revision_label, reviews=revision_reviews ) return previous_reviews_dict
def select_previous_review( self, trans, **kwd ): # The value of the received id is the encoded repository id. message = kwd.get( 'message', '' ) status = kwd.get( 'status', 'done' ) repository = suc.get_repository_in_tool_shed( trans, kwd[ 'id' ] ) changeset_revision = kwd.get( 'changeset_revision', None ) repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) previous_reviews_dict = review_util.get_previous_repository_reviews( trans, repository, changeset_revision ) rev, changeset_revision_label = suc.get_rev_label_from_changeset_revision( repo, changeset_revision ) return trans.fill_template( '/webapps/tool_shed/repository_review/select_previous_review.mako', repository=repository, changeset_revision=changeset_revision, changeset_revision_label=changeset_revision_label, previous_reviews_dict=previous_reviews_dict, message=message, status=status )
def handle_directory_changes( trans, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar, new_repo_alert, commit_message, undesirable_dirs_removed, undesirable_files_removed ): repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) content_alert_str = '' files_to_remove = [] filenames_in_archive = [ os.path.join( full_path, name ) for name in filenames_in_archive ] if remove_repo_files_not_in_tar and not repository.is_new( trans.app ): # We have a repository that is not new (it contains files), so discover those files that are in the repository, but not in the uploaded archive. for root, dirs, files in os.walk( full_path ): if root.find( '.hg' ) < 0 and root.find( 'hgrc' ) < 0: for undesirable_dir in UNDESIRABLE_DIRS: if undesirable_dir in dirs: dirs.remove( undesirable_dir ) undesirable_dirs_removed += 1 for undesirable_file in UNDESIRABLE_FILES: if undesirable_file in files: files.remove( undesirable_file ) undesirable_files_removed += 1 for name in files: full_name = os.path.join( root, name ) if full_name not in filenames_in_archive: files_to_remove.append( full_name ) for repo_file in files_to_remove: # Remove files in the repository (relative to the upload point) that are not in the uploaded archive. try: commands.remove( repo.ui, repo, repo_file, force=True ) except Exception, e: log.debug( "Error removing files using the mercurial API, so trying a different approach, the error was: %s" % str( e )) relative_selected_file = selected_file.split( 'repo_%d' % repository.id )[1].lstrip( '/' ) repo.dirstate.remove( relative_selected_file ) repo.dirstate.write() absolute_selected_file = os.path.abspath( selected_file ) if os.path.isdir( absolute_selected_file ): try: os.rmdir( absolute_selected_file ) except OSError, e: # The directory is not empty. pass elif os.path.isfile( absolute_selected_file ): os.remove( absolute_selected_file ) dir = os.path.split( absolute_selected_file )[0] try: os.rmdir( dir ) except OSError, e: # The directory is not empty. pass
def should_set_do_not_test_flag( app, repository, changeset_revision, testable_revision ): """ The received testable_revision is True if the tool has defined tests and test files are in the repository This method returns True if the received repository has multiple downloadable revisions and the received changeset_revision is not the most recent downloadable revision and the received testable_revision is False. In this case, the received changeset_revision will never be updated with correct data, and re-testing it would be redundant. """ if not testable_revision: repo_dir = repository.repo_path( app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) changeset_revisions = suc.get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True ) if len( changeset_revisions ) > 1: latest_downloadable_revision = changeset_revisions[ -1 ] if changeset_revision != latest_downloadable_revision: return True return False
def handle_repository_dependency_elem( trans, elem, unpopulate=False ): # <repository name="molecule_datatypes" owner="test" changeset_revision="1a070566e9c6" /> error_message = '' name = elem.get( 'name' ) owner = elem.get( 'owner' ) # The name and owner attributes are always required, so if either are missing, return the error message. if not name or not owner: error_message = handle_missing_repository_attribute( elem ) return False, elem, error_message revised = False toolshed = elem.get( 'toolshed' ) changeset_revision = elem.get( 'changeset_revision' ) if unpopulate: # We're exporting the repository, so eliminate all toolshed and changeset_revision attributes from the <repository> tag. if toolshed or changeset_revision: attributes = odict() attributes[ 'name' ] = name attributes[ 'owner' ] = owner attributes[ 'prior_installation_required' ] = elem.get( 'prior_installation_required', 'False' ) elem = xml_util.create_element( 'repository', attributes=attributes, sub_elements=None ) revised = True return revised, elem, error_message # From here on we're populating the toolshed and changeset_revisions if necessary. if not toolshed: # Default the setting to the current tool shed. toolshed = str( url_for( '/', qualified=True ) ).rstrip( '/' ) elem.attrib[ 'toolshed' ] = toolshed revised = True if not changeset_revision: # Populate the changeset_revision attribute with the latest installable metadata revision for the defined repository. # We use the latest installable revision instead of the latest metadata revision to ensure that the contents of the # revision are valid. repository = suc.get_repository_by_name_and_owner( trans.app, name, owner ) if repository: repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) lastest_installable_changeset_revision = suc.get_latest_downloadable_changeset_revision( trans, repository, repo ) if lastest_installable_changeset_revision != suc.INITIAL_CHANGELOG_HASH: elem.attrib[ 'changeset_revision' ] = lastest_installable_changeset_revision revised = True else: error_message = 'Invalid latest installable changeset_revision %s ' % str( lastest_installable_changeset_revision ) error_message += 'retrieved for repository %s owned by %s. ' % ( str( name ), str( owner ) ) else: error_message = 'Unable to locate repository with name %s and owner %s. ' % ( str( name ), str( owner ) ) return revised, elem, error_message
def create_hgrc_file( trans, repository ): # At this point, an entry for the repository is required to be in the hgweb.config file so we can call repository.repo_path( trans.app ). # Since we support both http and https, we set push_ssl to False to override the default (which is True) in the mercurial api. The hg # purge extension purges all files and directories not being tracked by mercurial in the current repository. It'll remove unknown files # and empty directories. This is not currently used because it is not supported in the mercurial API. repo = hg.repository( suc.get_configured_ui(), path=repository.repo_path( trans.app ) ) fp = repo.opener( 'hgrc', 'wb' ) fp.write( '[paths]\n' ) fp.write( 'default = .\n' ) fp.write( 'default-push = .\n' ) fp.write( '[web]\n' ) fp.write( 'allow_push = %s\n' % repository.user.username ) fp.write( 'name = %s\n' % repository.name ) fp.write( 'push_ssl = false\n' ) fp.write( '[extensions]\n' ) fp.write( 'hgext.purge=' ) fp.close()
def create_hgrc_file(trans, repository): # At this point, an entry for the repository is required to be in the hgweb.config file so we can call repository.repo_path( trans.app ). # Since we support both http and https, we set push_ssl to False to override the default (which is True) in the mercurial api. The hg # purge extension purges all files and directories not being tracked by mercurial in the current repository. It'll remove unknown files # and empty directories. This is not currently used because it is not supported in the mercurial API. repo = hg.repository(suc.get_configured_ui(), path=repository.repo_path(trans.app)) fp = repo.opener('hgrc', 'wb') fp.write('[paths]\n') fp.write('default = .\n') fp.write('default-push = .\n') fp.write('[web]\n') fp.write('allow_push = %s\n' % repository.user.username) fp.write('name = %s\n' % repository.name) fp.write('push_ssl = false\n') fp.write('[extensions]\n') fp.write('hgext.purge=') fp.close()
def manage_repository_reviews( self, trans, mine=False, **kwd ): # The value of the received id is the encoded repository id. message = kwd.get( 'message', '' ) status = kwd.get( 'status', 'done' ) repository_id = kwd.get( 'id', None ) if repository_id: repository = suc.get_repository_in_tool_shed( trans, repository_id ) repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) metadata_revision_hashes = [ metadata_revision.changeset_revision for metadata_revision in repository.metadata_revisions ] reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ] reviews_dict = odict() for changeset in suc.get_reversed_changelog_changesets( repo ): ctx = repo.changectx( changeset ) changeset_revision = str( ctx ) if changeset_revision in metadata_revision_hashes or changeset_revision in reviewed_revision_hashes: rev, changeset_revision_label = suc.get_rev_label_from_changeset_revision( repo, changeset_revision ) if changeset_revision in reviewed_revision_hashes: # Find the review for this changeset_revision repository_reviews = review_util.get_reviews_by_repository_id_changeset_revision( trans, repository_id, changeset_revision ) # Determine if the current user can add a review to this revision. can_add_review = trans.user not in [ repository_review.user for repository_review in repository_reviews ] repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, repository_id, changeset_revision ) if repository_metadata: repository_metadata_reviews = util.listify( repository_metadata.reviews ) else: repository_metadata_reviews = [] else: repository_reviews = [] repository_metadata_reviews = [] can_add_review = True installable = changeset_revision in metadata_revision_hashes revision_dict = dict( changeset_revision_label=changeset_revision_label, repository_reviews=repository_reviews, repository_metadata_reviews=repository_metadata_reviews, installable=installable, can_add_review=can_add_review ) reviews_dict[ changeset_revision ] = revision_dict return trans.fill_template( '/webapps/tool_shed/repository_review/reviews_of_repository.mako', repository=repository, reviews_dict=reviews_dict, mine=mine, message=message, status=status )
def create_repository( trans, name, type, description, long_description, user_id, category_ids=[] ): # Add the repository record to the database. repository = trans.app.model.Repository( name=name, type=type, description=description, long_description=long_description, user_id=user_id ) # Flush to get the id. trans.sa_session.add( repository ) trans.sa_session.flush() # Create an admin role for the repository. repository_admin_role = create_repository_admin_role( trans, repository ) # Determine the repository's repo_path on disk. dir = os.path.join( trans.app.config.file_path, *directory_hash_id( repository.id ) ) # Create directory if it does not exist. if not os.path.exists( dir ): os.makedirs( dir ) # Define repo name inside hashed directory. repository_path = os.path.join( dir, "repo_%d" % repository.id ) # Create local repository directory. if not os.path.exists( repository_path ): os.makedirs( repository_path ) # Create the local repository. repo = hg.repository( suc.get_configured_ui(), repository_path, create=True ) # Add an entry in the hgweb.config file for the local repository. lhs = "repos/%s/%s" % ( repository.user.username, repository.name ) trans.app.hgweb_config_manager.add_entry( lhs, repository_path ) # Create a .hg/hgrc file for the local repository. create_hgrc_file( trans, repository ) flush_needed = False if category_ids: # Create category associations for category_id in category_ids: category = trans.sa_session.query( trans.model.Category ) \ .get( trans.security.decode_id( category_id ) ) rca = trans.app.model.RepositoryCategoryAssociation( repository, category ) trans.sa_session.add( rca ) flush_needed = True if flush_needed: trans.sa_session.flush() message = "Repository <b>%s</b> has been created." % str( repository.name ) return repository, message
def manage_repository_reviews_of_revision( self, trans, **kwd ): # The value of the received id is the encoded repository id. message = kwd.get( 'message', '' ) status = kwd.get( 'status', 'done' ) repository_id = kwd.get( 'id', None ) changeset_revision = kwd.get( 'changeset_revision', None ) repository = suc.get_repository_in_tool_shed( trans, repository_id ) repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) installable = changeset_revision in [ metadata_revision.changeset_revision for metadata_revision in repository.metadata_revisions ] rev, changeset_revision_label = suc.get_rev_label_from_changeset_revision( repo, changeset_revision ) reviews = review_util.get_reviews_by_repository_id_changeset_revision( trans, repository_id, changeset_revision ) return trans.fill_template( '/webapps/tool_shed/repository_review/reviews_of_changeset_revision.mako', repository=repository, changeset_revision=changeset_revision, changeset_revision_label=changeset_revision_label, reviews=reviews, installable=installable, message=message, status=status )
def get_previous_repository_reviews(trans, repository, changeset_revision): """Return an ordered dictionary of repository reviews up to and including the received changeset revision.""" repo = hg.repository(suc.get_configured_ui(), repository.repo_path(trans.app)) reviewed_revision_hashes = [ review.changeset_revision for review in repository.reviews ] previous_reviews_dict = odict() for changeset in suc.reversed_upper_bounded_changelog( repo, changeset_revision): previous_changeset_revision = str(repo.changectx(changeset)) if previous_changeset_revision in reviewed_revision_hashes: previous_rev, previous_changeset_revision_label = suc.get_rev_label_from_changeset_revision( repo, previous_changeset_revision) revision_reviews = get_reviews_by_repository_id_changeset_revision( trans, trans.security.encode_id(repository.id), previous_changeset_revision) previous_reviews_dict[previous_changeset_revision] = dict( changeset_revision_label=previous_changeset_revision_label, reviews=revision_reviews) return previous_reviews_dict
def get_ordered_installable_revisions( self, trans, name, owner, **kwd ): """ GET /api/repositories/get_ordered_installable_revisions :param name: the name of the Repository :param owner: the owner of the Repository Returns the ordered list of changeset revision hash strings that are associated with installable revisions. As in the changelog, the list is ordered oldest to newest. """ # Example URL: http://localhost:9009/api/repositories/get_installable_revisions?name=add_column&owner=test try: # Get the repository information. repository = suc.get_repository_by_name_and_owner( trans.app, name, owner ) repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) ordered_installable_revisions = suc.get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True ) return ordered_installable_revisions except Exception, e: message = "Error in the Tool Shed repositories API in get_ordered_installable_revisions: %s" % str( e ) log.error( message, exc_info=True ) trans.response.status = 500 return message
def import_repository_archive( trans, repository, repository_archive_dict ): """Import a repository archive contained within a repository capsule.""" archive_file_name = repository_archive_dict.get( 'archive_file_name', None ) capsule_file_name = repository_archive_dict[ 'capsule_file_name' ] encoded_file_path = repository_archive_dict[ 'encoded_file_path' ] file_path = encoding_util.tool_shed_decode( encoded_file_path ) results_dict = dict( ok=True, error_message='' ) archive_file_path = os.path.join( file_path, archive_file_name ) archive = tarfile.open( archive_file_path, 'r:*' ) repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) undesirable_dirs_removed = 0 undesirable_files_removed = 0 ok, error_message = commit_util.check_archive( repository, archive ) if ok: full_path = os.path.abspath( repo_dir ) filenames_in_archive = [] for tarinfo_obj in archive.getmembers(): # Check files and directories in the archive. ok = os.path.basename( tarinfo_obj.name ) not in commit_util.UNDESIRABLE_FILES if ok: for file_path_item in tarinfo_obj.name.split( '/' ): if file_path_item in commit_util.UNDESIRABLE_DIRS: undesirable_dirs_removed += 1 error_message = 'Import failed: invalid file path <b>%s</b> in archive <b>%s</b>' % \ ( str( file_path_item ), str( archive_file_name ) ) results_dict[ 'ok' ] = False results_dict[ 'error_message' ] += error_message return results_dict filenames_in_archive.append( tarinfo_obj.name ) else: undesirable_files_removed += 1 # Extract the uploaded archive to the repository root. archive.extractall( path=full_path ) archive.close() for filename in filenames_in_archive: uploaded_file_name = os.path.join( full_path, filename ) if os.path.split( uploaded_file_name )[ -1 ] == suc.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem, error_message = commit_util.handle_repository_dependencies_definition( trans, uploaded_file_name, unpopulate=False ) if error_message: results_dict[ 'ok' ] = False results_dict[ 'error_message' ] += error_message if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem ) shutil.move( tmp_filename, uploaded_file_name ) elif os.path.split( uploaded_file_name )[ -1 ] == suc.TOOL_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem, error_message = commit_util.handle_tool_dependencies_definition( trans, uploaded_file_name ) if error_message: results_dict[ 'ok' ] = False results_dict[ 'error_message' ] += error_message if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem ) shutil.move( tmp_filename, uploaded_file_name ) commit_message = 'Imported from capsule %s' % str( capsule_file_name ) # Send email notification to those that have registered to receive alerts for new repositories in this Tool Shed. new_repo_alert = True # Since the repository is new, the following must be False. remove_repo_files_not_in_tar = False ok, error_message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \ commit_util.handle_directory_changes( trans, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar, new_repo_alert, commit_message, undesirable_dirs_removed, undesirable_files_removed ) try: metadata_util.set_repository_metadata_due_to_new_tip( trans, repository, content_alert_str=content_alert_str ) except Exception, e: log.debug( "Error setting metadata on repository %s created from imported archive %s: %s" % \ ( str( repository.name ), str( archive_file_name ), str( e ) ) ) results_dict[ 'ok' ] = ok results_dict[ 'error_message' ] += error_message
def get_repository_revision_install_info(self, trans, name, owner, changeset_revision, **kwd): """ GET /api/repositories/get_repository_revision_install_info :param name: the name of the Repository :param owner: the owner of the Repository :param changset_revision: the changset_revision of the RepositoryMetadata object associated with the Repository Returns a list of the following dictionaries:: - a dictionary defining the Repository. For example: { "deleted": false, "deprecated": false, "description": "add_column hello", "id": "f9cad7b01a472135", "long_description": "add_column hello", "name": "add_column", "owner": "test", "private": false, "times_downloaded": 6, "url": "/api/repositories/f9cad7b01a472135", "user_id": "f9cad7b01a472135" } - a dictionary defining the Repository revision (RepositoryMetadata). For example: { "changeset_revision": "3a08cc21466f", "downloadable": true, "has_repository_dependencies": false, "id": "f9cad7b01a472135", "includes_datatypes": false, "includes_tool_dependencies": false, "includes_tools": true, "includes_tools_for_display_in_tool_panel": true, "includes_workflows": false, "malicious": false, "repository_id": "f9cad7b01a472135", "url": "/api/repository_revisions/f9cad7b01a472135" } - a dictionary including the additional information required to install the repository. For example: { "add_column": [ "add_column hello", "http://test@localhost:9009/repos/test/add_column", "3a08cc21466f", "1", "test", {}, {} ] } """ # Example URL: http://localhost:9009/api/repositories/get_repository_revision_install_info?name=add_column&owner=test&changeset_revision=3a08cc21466f try: # Get the repository information. repository = suc.get_repository_by_name_and_owner( trans.app, name, owner) encoded_repository_id = trans.security.encode_id(repository.id) repository_dict = repository.get_api_value( view='element', value_mapper=default_repository_value_mapper( trans, repository)) repository_dict['url'] = web.url_for(controller='repositories', action='show', id=encoded_repository_id) # Get the repository_metadata information. repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, encoded_repository_id, changeset_revision) if not repository_metadata: # The changeset_revision column in the repository_metadata table has been updated with a new value value, so find the # changeset_revision to which we need to update. repo_dir = repository.repo_path(trans.app) repo = hg.repository(suc.get_configured_ui(), repo_dir) new_changeset_revision = suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision) repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, encoded_repository_id, new_changeset_revision) changeset_revision = new_changeset_revision if repository_metadata: encoded_repository_metadata_id = trans.security.encode_id( repository_metadata.id) repository_metadata_dict = repository_metadata.get_api_value( view='collection', value_mapper=default_repository_metadata_value_mapper( trans, repository_metadata)) repository_metadata_dict['url'] = web.url_for( controller='repository_revisions', action='show', id=encoded_repository_metadata_id) # Get the repo_info_dict for installing the repository. repo_info_dict, includes_tools, includes_tool_dependencies, includes_tools_for_display_in_tool_panel, has_repository_dependencies = \ repository_util.get_repo_info_dict( trans, encoded_repository_id, changeset_revision ) return repository_dict, repository_metadata_dict, repo_info_dict else: message = "Unable to locate repository_metadata record for repository id %d and changeset_revision %s" % ( repository.id, changeset_revision) log.error(message, exc_info=True) trans.response.status = 500 return repository_dict, {}, {} except Exception, e: message = "Error in the Tool Shed repositories API in get_repository_revision_install_info: %s" % str( e) log.error(message, exc_info=True) trans.response.status = 500 return message
def get_value( self, trans, grid, repository_metadata ): repository = repository_metadata.repository repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) ctx = suc.get_changectx_for_changeset( repo, repository_metadata.changeset_revision ) return suc.get_revision_label( trans, repository, repository_metadata.changeset_revision, include_date=True )
def get_repository_revision_install_info( self, trans, name, owner, changeset_revision, **kwd ): """ GET /api/repositories/get_repository_revision_install_info :param name: the name of the Repository :param owner: the owner of the Repository :param changeset_revision: the changeset_revision of the RepositoryMetadata object associated with the Repository Returns a list of the following dictionaries:: - a dictionary defining the Repository. For example: { "deleted": false, "deprecated": false, "description": "add_column hello", "id": "f9cad7b01a472135", "long_description": "add_column hello", "name": "add_column", "owner": "test", "private": false, "times_downloaded": 6, "url": "/api/repositories/f9cad7b01a472135", "user_id": "f9cad7b01a472135" } - a dictionary defining the Repository revision (RepositoryMetadata). For example: { "changeset_revision": "3a08cc21466f", "downloadable": true, "has_repository_dependencies": false, "has_repository_dependencies_only_if_compiling_contained_td": false, "id": "f9cad7b01a472135", "includes_datatypes": false, "includes_tool_dependencies": false, "includes_tools": true, "includes_tools_for_display_in_tool_panel": true, "includes_workflows": false, "malicious": false, "repository_id": "f9cad7b01a472135", "url": "/api/repository_revisions/f9cad7b01a472135" } - a dictionary including the additional information required to install the repository. For example: { "add_column": [ "add_column hello", "http://test@localhost:9009/repos/test/add_column", "3a08cc21466f", "1", "test", {}, {} ] } """ # Example URL: # http://<xyz>/api/repositories/get_repository_revision_install_info?name=<n>&owner=<o>&changeset_revision=<cr> if name and owner and changeset_revision: # Get the repository information. repository = suc.get_repository_by_name_and_owner( trans.app, name, owner ) if repository is None: log.debug( 'Cannot locate repository %s owned by %s' % ( str( name ), str( owner ) ) ) return {}, {}, {} encoded_repository_id = trans.security.encode_id( repository.id ) repository_dict = repository.to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) ) repository_dict[ 'url' ] = web.url_for( controller='repositories', action='show', id=encoded_repository_id ) # Get the repository_metadata information. repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, encoded_repository_id, changeset_revision ) if repository_metadata is None: # The changeset_revision column in the repository_metadata table has been updated with a new # value value, so find the changeset_revision to which we need to update. repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) new_changeset_revision = suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision ) repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, encoded_repository_id, new_changeset_revision ) changeset_revision = new_changeset_revision if repository_metadata is not None: encoded_repository_metadata_id = trans.security.encode_id( repository_metadata.id ) repository_metadata_dict = repository_metadata.to_dict( view='collection', value_mapper=self.__get_value_mapper( trans ) ) repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions', action='show', id=encoded_repository_metadata_id ) # Get the repo_info_dict for installing the repository. repo_info_dict, \ includes_tools, \ includes_tool_dependencies, \ includes_tools_for_display_in_tool_panel, \ has_repository_dependencies, \ has_repository_dependencies_only_if_compiling_contained_td = \ repository_util.get_repo_info_dict( trans, encoded_repository_id, changeset_revision ) return repository_dict, repository_metadata_dict, repo_info_dict else: log.debug( "Unable to locate repository_metadata record for repository id %s and changeset_revision %s" % \ ( str( repository.id ), str( changeset_revision ) ) ) return repository_dict, {}, {} else: debug_msg = "Error in the Tool Shed repositories API in get_repository_revision_install_info: " debug_msg += "Invalid name %s or owner %s or changeset_revision %s received." % \ ( str( name ), str( owner ), str( changeset_revision ) ) log.debug( debug_msg ) return {}, {}, {}
def upload_directory(self, trans, repository, uploaded_directory, upload_point, remove_repo_files_not_in_tar, commit_message, new_repo_alert): repo_dir = repository.repo_path(trans.app) repo = hg.repository(suc.get_configured_ui(), repo_dir) undesirable_dirs_removed = 0 undesirable_files_removed = 0 if upload_point is not None: full_path = os.path.abspath(os.path.join(repo_dir, upload_point)) else: full_path = os.path.abspath(repo_dir) filenames_in_archive = [] for root, dirs, files in os.walk(uploaded_directory): for uploaded_file in files: relative_path = os.path.normpath( os.path.join(os.path.relpath(root, uploaded_directory), uploaded_file)) if repository.type == rt_util.TOOL_DEPENDENCY_DEFINITION: ok = os.path.basename( uploaded_file ) == suc.TOOL_DEPENDENCY_DEFINITION_FILENAME else: ok = os.path.basename( uploaded_file) not in commit_util.UNDESIRABLE_FILES if ok: for file_path_item in relative_path.split('/'): if file_path_item in commit_util.UNDESIRABLE_DIRS: undesirable_dirs_removed += 1 ok = False break else: undesirable_files_removed += 1 if ok: uploaded_file_name = os.path.abspath( os.path.join(root, uploaded_file)) if os.path.split( uploaded_file_name )[-1] == suc.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem = commit_util.handle_repository_dependencies_definition( trans, uploaded_file_name) if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem) shutil.move(tmp_filename, uploaded_file_name) elif os.path.split(uploaded_file_name)[ -1] == suc.TOOL_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem = commit_util.handle_tool_dependencies_definition( trans, uploaded_file_name) if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem) shutil.move(tmp_filename, uploaded_file_name) repo_path = os.path.join(full_path, relative_path) repo_basedir = os.path.normpath( os.path.join(repo_path, os.path.pardir)) if not os.path.exists(repo_basedir): os.makedirs(repo_basedir) if os.path.exists(repo_path): if os.path.isdir(repo_path): shutil.rmtree(repo_path) else: os.remove(repo_path) shutil.move( os.path.join(uploaded_directory, relative_path), repo_path) filenames_in_archive.append(relative_path) return commit_util.handle_directory_changes( trans, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar, new_repo_alert, commit_message, undesirable_dirs_removed, undesirable_files_removed)
def get_value( self, trans, grid, repository_metadata ): repository = repository_metadata.repository repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) ctx = suc.get_changectx_for_changeset( repo, repository_metadata.changeset_revision ) return "%s:%s" % ( str( ctx.rev() ), repository_metadata.changeset_revision )
def upload_tar(self, trans, repository, tar, uploaded_file, upload_point, remove_repo_files_not_in_tar, commit_message, new_repo_alert): # Upload a tar archive of files. repo_dir = repository.repo_path(trans.app) repo = hg.repository(suc.get_configured_ui(), repo_dir) undesirable_dirs_removed = 0 undesirable_files_removed = 0 ok, message = commit_util.check_archive(repository, tar) if not ok: tar.close() uploaded_file.close() return ok, message, [], '', undesirable_dirs_removed, undesirable_files_removed else: if upload_point is not None: full_path = os.path.abspath( os.path.join(repo_dir, upload_point)) else: full_path = os.path.abspath(repo_dir) filenames_in_archive = [] for tarinfo_obj in tar.getmembers(): ok = os.path.basename( tarinfo_obj.name) not in commit_util.UNDESIRABLE_FILES if ok: for file_path_item in tarinfo_obj.name.split('/'): if file_path_item in commit_util.UNDESIRABLE_DIRS: undesirable_dirs_removed += 1 ok = False break else: undesirable_files_removed += 1 if ok: filenames_in_archive.append(tarinfo_obj.name) # Extract the uploaded tar to the load_point within the repository hierarchy. tar.extractall(path=full_path) tar.close() uploaded_file.close() for filename in filenames_in_archive: uploaded_file_name = os.path.join(full_path, filename) if os.path.split(uploaded_file_name)[ -1] == suc.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem = commit_util.handle_repository_dependencies_definition( trans, uploaded_file_name) if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem) shutil.move(tmp_filename, uploaded_file_name) elif os.path.split(uploaded_file_name)[ -1] == suc.TOOL_DEPENDENCY_DEFINITION_FILENAME: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem = commit_util.handle_tool_dependencies_definition( trans, uploaded_file_name) if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem) shutil.move(tmp_filename, uploaded_file_name) return commit_util.handle_directory_changes( trans, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar, new_repo_alert, commit_message, undesirable_dirs_removed, undesirable_files_removed)
def check_and_flag_repositories(app, info_only=False, verbosity=1): ''' This method will iterate through all records in the repository_metadata table, checking each one for tool metadata, then checking the tool metadata for tests. Each tool's metadata should look something like: { "add_to_tool_panel": true, "description": "", "guid": "toolshed.url:9009/repos/owner/name/tool_id/1.2.3", "id": "tool_wrapper", "name": "Map with Tool Wrapper", "requirements": [], "tests": [ { "inputs": [ [ "parameter", "value" ], [ "other_parameter", "other_value" ], ], "name": "Test-1", "outputs": [ [ "output_field_name", "output_file_name.bed" ] ], "required_files": [ '1.bed', '2.bed', '3.bed' ] } ], "tool_config": "database/community_files/000/repo_1/tool_wrapper.xml", "tool_type": "default", "version": "1.2.3", "version_string_cmd": null } If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository) not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision not to be tested. TODO: Update this dict structure with the recently added components. If any error is encountered, the script will update the repository_metadata.tool_test_results attribute following this structure: { "test_environment": { "galaxy_revision": "9001:abcd1234", "galaxy_database_version": "114", "tool_shed_revision": "9001:abcd1234", "tool_shed_mercurial_version": "2.3.1", "tool_shed_database_version": "17", "python_version": "2.7.2", "architecture": "x86_64", "system": "Darwin 12.2.0" }, "passed_tests": [ { "test_id": "The test ID, generated by twill", "tool_id": "The tool ID that was tested", "tool_version": "The tool version that was tested", }, ] "failed_tests": [ { "test_id": "The test ID, generated by twill", "tool_id": "The tool ID that was tested", "tool_version": "The tool version that was tested", "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." "traceback": "The captured traceback." }, ] "installation_errors": { 'tool_dependencies': [ { 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', 'name': 'Name of the tool dependency.', 'version': 'Version if this is a package, otherwise blank.', 'error_message': 'The error message returned when installation was attempted.', }, ], 'repository_dependencies': [ { 'tool_shed': 'The tool shed that this repository was installed from.', 'name': 'The name of the repository that failed to install.', 'owner': 'Owner of the failed repository.', 'changeset_revision': 'Changeset revision of the failed repository.', 'error_message': 'The error message that was returned when the repository failed to install.', }, ], 'current_repository': [ { 'tool_shed': 'The tool shed that this repository was installed from.', 'name': 'The name of the repository that failed to install.', 'owner': 'Owner of the failed repository.', 'changeset_revision': 'Changeset revision of the failed repository.', 'error_message': 'The error message that was returned when the repository failed to install.', }, ], { "name": "The name of the repository.", "owner": "The owner of the repository.", "changeset_revision": "The changeset revision of the repository.", "error_message": "The message stored in tool_dependency.error_message." }, } "missing_test_components": [ { "tool_id": "The tool ID that missing components.", "tool_version": "The version of the tool." "tool_guid": "The guid of the tool." "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory." }, ] } ''' start = time.time() skip_metadata_ids = [] checked_repository_ids = [] tool_count = 0 has_tests = 0 no_tests = 0 no_tools = 0 valid_revisions = 0 invalid_revisions = 0 records_checked = 0 # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway. skip_metadata_ids = select( [app.model.SkipToolTest.table.c.repository_metadata_id]) # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test, # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable, # because it's redundant to test a revision that a user can't install. for metadata_record in app.sa_session.query( app.model.RepositoryMetadata ) \ .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True, app.model.RepositoryMetadata.table.c.includes_tools == True, app.model.RepositoryMetadata.table.c.do_not_test == False, not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ): records_checked += 1 # Initialize the repository_status dict with the test environment, but leave the test_errors empty. repository_status = {} if metadata_record.tool_test_results: repository_status = metadata_record.tool_test_results # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows, # or tests incorrectly labeled as invalid. missing_test_components = [] if 'test_environment' in repository_status: repository_status['test_environment'] = get_test_environment( repository_status['test_environment']) else: repository_status['test_environment'] = get_test_environment() repository_status['test_environment'][ 'tool_shed_database_version'] = get_database_version(app) repository_status['test_environment'][ 'tool_shed_mercurial_version'] = __version__.version repository_status['test_environment'][ 'tool_shed_revision'] = get_repository_current_revision( os.getcwd()) name = metadata_record.repository.name owner = metadata_record.repository.user.username changeset_revision = str(metadata_record.changeset_revision) if metadata_record.repository.id not in checked_repository_ids: checked_repository_ids.append(metadata_record.repository.id) if verbosity >= 1: print '# -------------------------------------------------------------------------------------------' print '# Now checking revision %s of %s, owned by %s.' % ( changeset_revision, name, owner) # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning # only repositories that contain tools. if 'tools' not in metadata_record.metadata: continue else: has_test_data = False testable_revision_found = False # Clone the repository up to the changeset revision we're checking. repo_dir = metadata_record.repository.repo_path(app) repo = hg.repository(get_configured_ui(), repo_dir) work_dir = tempfile.mkdtemp(prefix="tmp-toolshed-cafr") cloned_ok, error_message = clone_repository( repo_dir, work_dir, changeset_revision) if cloned_ok: # Iterate through all the directories in the cloned changeset revision and determine whether there's a # directory named test-data. If this directory is not present, update the metadata record for the changeset # revision we're checking. for root, dirs, files in os.walk(work_dir): if '.hg' in dirs: dirs.remove('.hg') if 'test-data' in dirs: has_test_data = True test_data_path = os.path.join( root, dirs[dirs.index('test-data')]) break if verbosity >= 1: if not has_test_data: print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner) else: print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner) print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \ ( changeset_revision, name, owner ) # Loop through all the tools in this metadata record, checking each one for defined functional tests. for tool_metadata in metadata_record.metadata['tools']: tool_count += 1 tool_id = tool_metadata['id'] tool_version = tool_metadata['version'] tool_guid = tool_metadata['guid'] if verbosity >= 2: print "# Checking tool ID '%s' in changeset revision %s of %s." % \ ( tool_id, changeset_revision, name ) # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails, # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the # automated functional test framework produces. tool_has_tests = True if 'tests' not in tool_metadata or not tool_metadata['tests']: tool_has_tests = False if verbosity >= 2: print '# No functional tests defined for %s.' % tool_id no_tests += 1 else: tool_has_tests = True if verbosity >= 2: print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \ ( tool_id, changeset_revision, name ) has_tests += 1 failure_reason = '' problem_found = False missing_test_files = [] has_test_files = False if tool_has_tests and has_test_data: missing_test_files = check_for_missing_test_files( tool_metadata['tests'], test_data_path) if missing_test_files: if verbosity >= 2: print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \ ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) ) else: has_test_files = True if not has_test_data: failure_reason += 'Repository does not have a test-data directory. ' problem_found = True if not tool_has_tests: failure_reason += 'Functional test definitions missing for %s. ' % tool_id problem_found = True if missing_test_files: failure_reason += 'One or more test files are missing for tool %s: %s' % ( tool_id, ', '.join(missing_test_files)) problem_found = True test_errors = dict(tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid, missing_components=failure_reason) # The repository_metadata.tool_test_results attribute should always have the following structure: # { # "test_environment": # { # "galaxy_revision": "9001:abcd1234", # "galaxy_database_version": "114", # "tool_shed_revision": "9001:abcd1234", # "tool_shed_mercurial_version": "2.3.1", # "tool_shed_database_version": "17", # "python_version": "2.7.2", # "architecture": "x86_64", # "system": "Darwin 12.2.0" # }, # "passed_tests": # [ # { # "test_id": "The test ID, generated by twill", # "tool_id": "The tool ID that was tested", # "tool_version": "The tool version that was tested", # }, # ] # "failed_tests": # [ # { # "test_id": "The test ID, generated by twill", # "tool_id": "The tool ID that was tested", # "tool_version": "The tool version that was tested", # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." # "traceback": "The captured traceback." # }, # ] # "installation_errors": # { # 'tool_dependencies': # [ # { # 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', # 'name': 'Name of the tool dependency.', # 'version': 'Version if this is a package, otherwise blank.', # 'error_message': 'The error message returned when installation was attempted.', # }, # ], # 'repository_dependencies': # [ # { # 'tool_shed': 'The tool shed that this repository was installed from.', # 'name': 'The name of the repository that failed to install.', # 'owner': 'Owner of the failed repository.', # 'changeset_revision': 'Changeset revision of the failed repository.', # 'error_message': 'The error message that was returned when the repository failed to install.', # }, # ], # 'current_repository': # [ # { # 'tool_shed': 'The tool shed that this repository was installed from.', # 'name': 'The name of the repository that failed to install.', # 'owner': 'Owner of the failed repository.', # 'changeset_revision': 'Changeset revision of the failed repository.', # 'error_message': 'The error message that was returned when the repository failed to install.', # }, # ], # { # "name": "The name of the repository.", # "owner": "The owner of the repository.", # "changeset_revision": "The changeset revision of the repository.", # "error_message": "The message stored in tool_dependency.error_message." # }, # } # "missing_test_components": # [ # { # "tool_id": "The tool ID that missing components.", # "tool_version": "The version of the tool." # "tool_guid": "The guid of the tool." # "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory." # }, # ] # } # # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other # than the list relevant to what it is testing. # Only append this error dict if it hasn't already been added. if problem_found: if test_errors not in missing_test_components: missing_test_components.append(test_errors) if tool_has_tests and has_test_files: testable_revision_found = True # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons. if os.path.exists(work_dir): shutil.rmtree(work_dir) if not missing_test_components: valid_revisions += 1 if verbosity >= 1: print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner) else: invalid_revisions += 1 if verbosity >= 1: print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner) if verbosity >= 2: for invalid_test in missing_test_components: if 'missing_components' in invalid_test: print '# %s' % invalid_test[ 'missing_components'] if not info_only: # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform # on which this script was run. if missing_test_components: # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been # found in this revision, and: # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. # In this case, the revision will never be updated with the missing components, and re-testing it would be redundant. # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable # revision. In this case, if the repository is updated with test data or functional tests, the downloadable # changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable # changeset revision will be created, either of which will be automatically checked and flagged as appropriate. # In the install and test script, this behavior is slightly different, since we do want to always run functional # tests on the most recent downloadable changeset revision. if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ) and not testable_revision_found: metadata_record.do_not_test = True metadata_record.tools_functionally_correct = False metadata_record.missing_test_components = True repository_status[ 'missing_test_components'] = missing_test_components metadata_record.tool_test_results = repository_status metadata_record.time_last_tested = datetime.utcnow() app.sa_session.add(metadata_record) app.sa_session.flush() stop = time.time() print '# -------------------------------------------------------------------------------------------' print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len(checked_repository_ids), tool_count, records_checked) print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions print '# Found %d tools without functional tests.' % no_tests print '# Found %d tools with functional tests.' % has_tests if info_only: print '# Database not updated, info_only set.' print "# Elapsed time: ", stop - start print "#############################################################################"
def repository_dependencies( self, trans, id, **kwd ): """ GET /api/repository_revisions/{encoded repository_metadata id}/repository_dependencies Returns a list of dictionaries that each define a specific downloadable revision of a repository in the Tool Shed. This method returns dictionaries with more information in them than other methods in this controller. The information about repository_metdata is enhanced to include information about the repository (e.g., name, owner, etc) associated with the repository_metadata record. :param id: the encoded id of the `RepositoryMetadata` object """ # Example URL: http://localhost:9009/api/repository_revisions/repository_dependencies/bb125606ff9ea620 repository_dependencies_dicts = [] repository_metadata = metadata_util.get_repository_metadata_by_id( trans, id ) if repository_metadata is None: log.debug( 'Invalid repository_metadata id received: %s' % str( id ) ) return repository_dependencies_dicts metadata = repository_metadata.metadata if metadata is None: log.debug( 'The repository_metadata record with id %s has no metadata.' % str ( id ) ) return repository_dependencies_dicts if 'repository_dependencies' in metadata: rd_tups = metadata[ 'repository_dependencies' ][ 'repository_dependencies' ] for rd_tup in rd_tups: tool_shed, name, owner, changeset_revision = rd_tup[ 0:4 ] repository_dependency = suc.get_repository_by_name_and_owner( trans.app, name, owner ) if repository_dependency is None: log.dbug( 'Cannot locate repository dependency %s owned by %s.' % ( name, owner ) ) continue repository_dependency_id = trans.security.encode_id( repository_dependency.id ) repository_dependency_repository_metadata = \ suc.get_repository_metadata_by_changeset_revision( trans, repository_dependency_id, changeset_revision ) if repository_dependency_repository_metadata is None: # The changeset_revision column in the repository_metadata table has been updated with a new # value value, so find the changeset_revision to which we need to update. repo_dir = repository_dependency.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) new_changeset_revision = suc.get_next_downloadable_changeset_revision( repository_dependency, repo, changeset_revision ) repository_dependency_repository_metadata = \ suc.get_repository_metadata_by_changeset_revision( trans, repository_dependency_id, new_changeset_revision ) if repository_dependency_repository_metadata is None: decoded_repository_dependency_id = trans.security.decode_id( repository_dependency_id ) debug_msg = 'Cannot locate repository_metadata with id %d for repository dependency %s owned by %s ' % \ ( decoded_repository_dependency_id, str( name ), str( owner ) ) debug_msg += 'using either of these changeset_revisions: %s, %s.' % \ ( str( changeset_revision ), str( new_changeset_revision ) ) log.debug( debug_msg ) continue else: changeset_revision = new_changeset_revision repository_dependency_repository_metadata_id = trans.security.encode_id( repository_dependency_repository_metadata.id ) repository_dependency_metadata_dict = \ repository_dependency_repository_metadata.to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) ) repository_dependency_dict = repository_dependency.to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) ) # We need to be careful with the entries in our repository_dependency_dict here since this Tool Shed API # controller is working with repository_metadata records. The above to_dict() method returns a dictionary # with an id entry for the repository record. However, all of the other methods in this controller have # the id entry associated with a repository_metadata record id. To avoid confusion, we'll update the # repository_dependency_metadata_dict with entries from the repository_dependency_dict without using the # Python dictionary update() method because we do not want to overwrite existing entries. for k, v in repository_dependency_dict.items(): if k not in repository_dependency_metadata_dict: repository_dependency_metadata_dict[ k ] = v repository_dependency_metadata_dict[ 'url' ] = web.url_for( controller='repositories', action='show', id=repository_dependency_id ) repository_dependencies_dicts.append( repository_dependency_metadata_dict ) return repository_dependencies_dicts
def get_repository_revision_install_info( self, trans, name, owner, changeset_revision, **kwd ): """ GET /api/repositories/get_repository_revision_install_info :param name: the name of the Repository :param owner: the owner of the Repository :param changset_revision: the changset_revision of the RepositoryMetadata object associated with the Repository Returns a list of the following dictionaries:: - a dictionary defining the Repository. For example: { "deleted": false, "deprecated": false, "description": "add_column hello", "id": "f9cad7b01a472135", "long_description": "add_column hello", "name": "add_column", "owner": "test", "private": false, "times_downloaded": 6, "url": "/api/repositories/f9cad7b01a472135", "user_id": "f9cad7b01a472135" } - a dictionary defining the Repository revision (RepositoryMetadata). For example: { "changeset_revision": "3a08cc21466f", "downloadable": true, "has_repository_dependencies": false, "id": "f9cad7b01a472135", "includes_datatypes": false, "includes_tool_dependencies": false, "includes_tools": true, "includes_tools_for_display_in_tool_panel": true, "includes_workflows": false, "malicious": false, "repository_id": "f9cad7b01a472135", "url": "/api/repository_revisions/f9cad7b01a472135" } - a dictionary including the additional information required to install the repository. For example: { "add_column": [ "add_column hello", "http://test@localhost:9009/repos/test/add_column", "3a08cc21466f", "1", "test", {}, {} ] } """ # Example URL: http://localhost:9009/api/repositories/get_repository_revision_install_info?name=add_column&owner=test&changeset_revision=3a08cc21466f try: # Get the repository information. repository = suc.get_repository_by_name_and_owner( trans.app, name, owner ) encoded_repository_id = trans.security.encode_id( repository.id ) repository_dict = repository.get_api_value( view='element', value_mapper=default_repository_value_mapper( trans, repository ) ) repository_dict[ 'url' ] = web.url_for( controller='repositories', action='show', id=encoded_repository_id ) # Get the repository_metadata information. repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, encoded_repository_id, changeset_revision ) if not repository_metadata: # The changeset_revision column in the repository_metadata table has been updated with a new value value, so find the # changeset_revision to which we need to update. repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) new_changeset_revision = suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision ) repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, encoded_repository_id, new_changeset_revision ) changeset_revision = new_changeset_revision if repository_metadata: encoded_repository_metadata_id = trans.security.encode_id( repository_metadata.id ) repository_metadata_dict = repository_metadata.get_api_value( view='collection', value_mapper=default_repository_metadata_value_mapper( trans, repository_metadata ) ) repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions', action='show', id=encoded_repository_metadata_id ) # Get the repo_info_dict for installing the repository. repo_info_dict, includes_tools, includes_tool_dependencies, includes_tools_for_display_in_tool_panel, has_repository_dependencies = \ repository_util.get_repo_info_dict( trans, encoded_repository_id, changeset_revision ) return repository_dict, repository_metadata_dict, repo_info_dict else: message = "Unable to locate repository_metadata record for repository id %d and changeset_revision %s" % ( repository.id, changeset_revision ) log.error( message, exc_info=True ) trans.response.status = 500 return repository_dict, {}, {} except Exception, e: message = "Error in the Tool Shed repositories API in get_repository_revision_install_info: %s" % str( e ) log.error( message, exc_info=True ) trans.response.status = 500 return message
def upload(self, trans, **kwd): message = kwd.get('message', '') status = kwd.get('status', 'done') commit_message = kwd.get('commit_message', 'Uploaded') category_ids = util.listify(kwd.get('category_id', '')) categories = suc.get_categories(trans) repository_id = kwd.get('repository_id', '') repository = suc.get_repository_in_tool_shed(trans, repository_id) repo_dir = repository.repo_path(trans.app) repo = hg.repository(suc.get_configured_ui(), repo_dir) uncompress_file = util.string_as_bool( kwd.get('uncompress_file', 'true')) remove_repo_files_not_in_tar = util.string_as_bool( kwd.get('remove_repo_files_not_in_tar', 'true')) uploaded_file = None upload_point = commit_util.get_upload_point(repository, **kwd) tip = repository.tip(trans.app) file_data = kwd.get('file_data', '') url = kwd.get('url', '') # Part of the upload process is sending email notification to those that have registered to # receive them. One scenario occurs when the first change set is produced for the repository. # See the suc.handle_email_alerts() method for the definition of the scenarios. new_repo_alert = repository.is_new(trans.app) uploaded_directory = None if kwd.get('upload_button', False): if file_data == '' and url == '': message = 'No files were entered on the upload form.' status = 'error' uploaded_file = None elif url and url.startswith('hg'): # Use mercurial clone to fetch repository, contents will then be copied over. uploaded_directory = tempfile.mkdtemp() repo_url = 'http%s' % url[len('hg'):] repo_url = repo_url.encode('ascii', 'replace') commands.clone(suc.get_configured_ui(), repo_url, uploaded_directory) elif url: valid_url = True try: stream = urllib.urlopen(url) except Exception, e: valid_url = False message = 'Error uploading file via http: %s' % str(e) status = 'error' uploaded_file = None if valid_url: fd, uploaded_file_name = tempfile.mkstemp() uploaded_file = open(uploaded_file_name, 'wb') while 1: chunk = stream.read(util.CHUNK_SIZE) if not chunk: break uploaded_file.write(chunk) uploaded_file.flush() uploaded_file_filename = url.split('/')[-1] isempty = os.path.getsize( os.path.abspath(uploaded_file_name)) == 0 elif file_data not in ('', None): uploaded_file = file_data.file uploaded_file_name = uploaded_file.name uploaded_file_filename = os.path.split(file_data.filename)[-1] isempty = os.path.getsize( os.path.abspath(uploaded_file_name)) == 0 if uploaded_file or uploaded_directory: ok = True isgzip = False isbz2 = False if uploaded_file: if uncompress_file: isgzip = checkers.is_gzip(uploaded_file_name) if not isgzip: isbz2 = checkers.is_bz2(uploaded_file_name) if isempty: tar = None istar = False else: # Determine what we have - a single file or an archive try: if (isgzip or isbz2) and uncompress_file: # Open for reading with transparent compression. tar = tarfile.open(uploaded_file_name, 'r:*') else: tar = tarfile.open(uploaded_file_name) istar = True except tarfile.ReadError, e: tar = None istar = False else: # Uploaded directory istar = False if istar: ok, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \ self.upload_tar( trans, repository, tar, uploaded_file, upload_point, remove_repo_files_not_in_tar, commit_message, new_repo_alert ) elif uploaded_directory: ok, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \ self.upload_directory( trans, repository, uploaded_directory, upload_point, remove_repo_files_not_in_tar, commit_message, new_repo_alert ) else: if (isgzip or isbz2) and uncompress_file: uploaded_file_filename = commit_util.uncompress( repository, uploaded_file_name, uploaded_file_filename, isgzip, isbz2) if repository.type == rt_util.TOOL_DEPENDENCY_DEFINITION and uploaded_file_filename != suc.TOOL_DEPENDENCY_DEFINITION_FILENAME: ok = False message = 'Repositories of type <b>Tool dependency definition</b> can only contain a single file named <b>tool_dependencies.xml</b>.' if ok: if upload_point is not None: full_path = os.path.abspath( os.path.join(repo_dir, upload_point, uploaded_file_filename)) else: full_path = os.path.abspath( os.path.join(repo_dir, uploaded_file_filename)) # Move some version of the uploaded file to the load_point within the repository hierarchy. if uploaded_file_filename in [ suc.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME ]: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem = commit_util.handle_repository_dependencies_definition( trans, uploaded_file_name) if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem) shutil.move(tmp_filename, full_path) else: shutil.move(uploaded_file_name, full_path) elif uploaded_file_filename in [ suc.TOOL_DEPENDENCY_DEFINITION_FILENAME ]: # Inspect the contents of the file to see if it defines a complex repository dependency definition whose changeset_revision values # are missing and if so, set them appropriately. altered, root_elem = commit_util.handle_tool_dependencies_definition( trans, uploaded_file_name) if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem) shutil.move(tmp_filename, full_path) else: shutil.move(uploaded_file_name, full_path) else: shutil.move(uploaded_file_name, full_path) # See if any admin users have chosen to receive email alerts when a repository is updated. If so, check every uploaded file to ensure # content is appropriate. check_contents = commit_util.check_file_contents_for_email_alerts( trans) if check_contents and os.path.isfile(full_path): content_alert_str = commit_util.check_file_content_for_html_and_images( full_path) else: content_alert_str = '' commands.add(repo.ui, repo, full_path) # Convert from unicode to prevent "TypeError: array item must be char" full_path = full_path.encode('ascii', 'replace') commands.commit(repo.ui, repo, full_path, user=trans.user.username, message=commit_message) if full_path.endswith( 'tool_data_table_conf.xml.sample'): # Handle the special case where a tool_data_table_conf.xml.sample file is being uploaded by parsing the file and adding new entries # to the in-memory trans.app.tool_data_tables dictionary. error, error_message = tool_util.handle_sample_tool_data_table_conf_file( trans.app, full_path) if error: message = '%s<br/>%s' % (message, error_message) # See if the content of the change set was valid. admin_only = len( repository.downloadable_revisions) != 1 suc.handle_email_alerts( trans, repository, content_alert_str=content_alert_str, new_repo_alert=new_repo_alert, admin_only=admin_only) if ok: # Update the repository files for browsing. suc.update_repository(repo) # Get the new repository tip. if tip == repository.tip(trans.app): message = 'No changes to repository. ' status = 'warning' else: if (isgzip or isbz2) and uncompress_file: uncompress_str = ' uncompressed and ' else: uncompress_str = ' ' if uploaded_directory: source_type = "repository" source = url else: source_type = "file" source = uploaded_file_filename message = "The %s <b>%s</b> has been successfully%suploaded to the repository. " % ( source_type, source, uncompress_str) if istar and (undesirable_dirs_removed or undesirable_files_removed): items_removed = undesirable_dirs_removed + undesirable_files_removed message += " %d undesirable items (.hg .svn .git directories, .DS_Store, hgrc files, etc) were removed from the archive. " % items_removed if istar and remove_repo_files_not_in_tar and files_to_remove: if upload_point is not None: message += " %d files were removed from the repository relative to the selected upload point '%s'. " % ( len(files_to_remove), upload_point) else: message += " %d files were removed from the repository root. " % len( files_to_remove) kwd['message'] = message metadata_util.set_repository_metadata_due_to_new_tip( trans, repository, content_alert_str=content_alert_str, **kwd) if repository.metadata_revisions: # A repository's metadata revisions are order descending by update_time, so the zeroth revision will be the tip just after an upload. metadata_dict = repository.metadata_revisions[ 0].metadata else: metadata_dict = {} if str(repository.type ) != rt_util.TOOL_DEPENDENCY_DEFINITION: # Provide a warning message if a tool_dependencies.xml file is provided, but tool dependencies weren't loaded due to a requirement tag mismatch # or some other problem. Tool dependency definitions can define orphan tool dependencies (no relationship to any tools contained in the repository), # so warning messages are important because orphans are always valid. The repository owner must be warned in case they did not intend to define an # orphan dependency, but simply provided incorrect information (tool shed, name owner, changeset_revision) for the definition. # Handle messaging for orphan tool dependencies. orphan_message = tool_dependency_util.generate_message_for_orphan_tool_dependencies( trans, repository, metadata_dict) if orphan_message: message += orphan_message status = 'warning' # Handle messaging for invalid tool dependencies. invalid_tool_dependencies_message = tool_dependency_util.generate_message_for_invalid_tool_dependencies( metadata_dict) if invalid_tool_dependencies_message: message += invalid_tool_dependencies_message status = 'error' # Handle messaging for invalid repository dependencies. invalid_repository_dependencies_message = repository_dependency_util.generate_message_for_invalid_repository_dependencies( metadata_dict) if invalid_repository_dependencies_message: message += invalid_repository_dependencies_message status = 'error' # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file. tool_util.reset_tool_data_tables(trans.app) trans.response.send_redirect( web.url_for(controller='repository', action='browse_repository', id=repository_id, commit_message='Deleted selected files', message=message, status=status)) else: status = 'error' # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file. tool_util.reset_tool_data_tables(trans.app)
def check_and_update_repository_metadata( app, info_only=False, verbosity=1 ): """ This method will iterate through all records in the repository_metadata table, checking each one for tool metadata, then checking the tool metadata for tests. Each tool's metadata should look something like: { "add_to_tool_panel": true, "description": "", "guid": "toolshed.url:9009/repos/owner/name/tool_id/1.2.3", "id": "tool_wrapper", "name": "Map with Tool Wrapper", "requirements": [], "tests": [], "tool_config": "database/community_files/000/repo_1/tool_wrapper.xml", "tool_type": "default", "version": "1.2.3", "version_string_cmd": null } If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository) not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision not to be tested. """ start = time.time() skip_metadata_ids = [] checked_repository_ids = [] tool_count = 0 has_tests = 0 no_tests = 0 no_tools = 0 valid_revisions = 0 invalid_revisions = 0 records_checked = 0 # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway. print '# -------------------------------------------------------------------------------------------' print '# The skip_tool_test setting has been set for the following repository revision, so they will not be tested.' skip_metadata_ids = [] for skip_tool_test in app.sa_session.query( app.model.SkipToolTest ): print '# repository_metadata_id: %s, changeset_revision: %s' % \ ( str( skip_tool_test.repository_metadata_id ), str( skip_tool_test.initial_changeset_revision ) ) print 'reason: %s' % str( skip_tool_test.comment ) skip_metadata_ids.append( skip_tool_test.repository_metadata_id ) # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test, # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable, # because it's redundant to test a revision that a user can't install. for repository_metadata in app.sa_session.query( app.model.RepositoryMetadata ) \ .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True, app.model.RepositoryMetadata.table.c.includes_tools == True, app.model.RepositoryMetadata.table.c.do_not_test == False ) ): # Initialize some items. missing_test_components = [] revision_has_test_data = False testable_revision = False repository = repository_metadata.repository records_checked += 1 # Check the next repository revision. changeset_revision = str( repository_metadata.changeset_revision ) name = repository.name owner = repository.user.username metadata = repository_metadata.metadata repository = repository_metadata.repository if repository.id not in checked_repository_ids: checked_repository_ids.append( repository.id ) print '# -------------------------------------------------------------------------------------------' print '# Checking revision %s of %s owned by %s.' % ( changeset_revision, name, owner ) if repository_metadata.id in skip_metadata_ids: print'# Skipping revision %s of %s owned by %s because the skip_tool_test setting has been set.' % ( changeset_revision, name, owner ) continue # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning # only repositories that contain tools. tool_dicts = metadata.get( 'tools', None ) if tool_dicts is not None: # Clone the repository up to the changeset revision we're checking. repo_dir = repository.repo_path( app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-cafr" ) cloned_ok, error_message = suc.clone_repository( repo_dir, work_dir, changeset_revision ) if cloned_ok: # Iterate through all the directories in the cloned changeset revision and determine whether there's a # directory named test-data. If this directory is not present update the metadata record for the changeset # revision we're checking. for root, dirs, files in os.walk( work_dir ): if '.hg' in dirs: dirs.remove( '.hg' ) if 'test-data' in dirs: revision_has_test_data = True test_data_path = os.path.join( root, dirs[ dirs.index( 'test-data' ) ] ) break if revision_has_test_data: print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) else: print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \ ( changeset_revision, name, owner ) # Inspect each tool_dict for defined functional tests. If there are no tests, this tool should not be tested, since the # tool functional tests only report failure if the test itself fails, not if it's missing or undefined. Filtering out those # repositories at this step will reduce the number of "false negatives" the automated functional test framework produces. for tool_dict in tool_dicts: failure_reason = '' problem_found = False tool_has_defined_tests = False tool_has_test_files = False missing_test_files = [] tool_count += 1 tool_id = tool_dict[ 'id' ] tool_version = tool_dict[ 'version' ] tool_guid = tool_dict[ 'guid' ] if verbosity >= 1: print "# Checking tool ID '%s' in changeset revision %s of %s." % ( tool_id, changeset_revision, name ) defined_test_dicts = tool_dict.get( 'tests', None ) if defined_test_dicts is not None: # We need to inspect the <test> tags because the following tags... # <tests> # </tests> # ...will produce the following metadata: # "tests": [] # And the following tags... # <tests> # <test> # </test> # </tests> # ...will produce the following metadata: # "tests": # [{"inputs": [], "name": "Test-1", "outputs": [], "required_files": []}] for defined_test_dict in defined_test_dicts: inputs = defined_test_dict.get( 'inputs', [] ) outputs = defined_test_dict.get( 'outputs', [] ) if inputs and outputs: # At least one tool within the repository has a valid <test> tag. tool_has_defined_tests = True break if tool_has_defined_tests: print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \ ( tool_id, changeset_revision, name ) has_tests += 1 else: print '# No functional tests defined for %s.' % tool_id no_tests += 1 if tool_has_defined_tests and revision_has_test_data: missing_test_files = check_for_missing_test_files( defined_test_dicts, test_data_path ) if missing_test_files: print "# Tool id '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \ ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) ) else: tool_has_test_files = True if not revision_has_test_data: failure_reason += 'Repository does not have a test-data directory. ' problem_found = True if not tool_has_defined_tests: failure_reason += 'Functional test definitions missing for %s. ' % tool_id problem_found = True if missing_test_files: failure_reason += 'One or more test files are missing for tool %s: %s' % ( tool_id, ', '.join( missing_test_files ) ) problem_found = True test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid, missing_components=failure_reason ) # Only append this error dict if it hasn't already been added. if problem_found: if test_errors not in missing_test_components: missing_test_components.append( test_errors ) if tool_has_defined_tests and tool_has_test_files: print '# Revision %s of %s owned by %s is a testable revision.' % ( changeset_revision, name, owner ) testable_revision = True # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons. if os.path.exists( work_dir ): shutil.rmtree( work_dir ) if not missing_test_components: valid_revisions += 1 print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) else: invalid_revisions += 1 print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) if verbosity >= 1: for missing_test_component in missing_test_components: if 'missing_components' in missing_test_component: print '# %s' % missing_test_component[ 'missing_components' ] if not info_only: # Get or create the list of tool_test_results dictionaries. if repository_metadata.tool_test_results is not None: # We'll listify the column value in case it uses the old approach of storing the results of only a single test run. tool_test_results_dicts = listify( repository_metadata.tool_test_results ) else: tool_test_results_dicts = [] if tool_test_results_dicts: # Inspect the tool_test_results_dict for the last test run in case it contains only a test_environment # entry. This will occur with multiple runs of this script without running the associated # install_and_test_tool_sed_repositories.sh script which will further populate the tool_test_results_dict. tool_test_results_dict = tool_test_results_dicts[ 0 ] if len( tool_test_results_dict ) <= 1: # We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only # a test_environment entry. If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts # since it will be re-inserted later. tool_test_results_dict = tool_test_results_dicts.pop( 0 ) elif len( tool_test_results_dict ) == 2 and \ 'test_environment' in tool_test_results_dict and 'missing_test_components' in tool_test_results_dict: # We can re-use tool_test_results_dict if its only entries are "test_environment" and "missing_test_components". # In this case, some tools are missing tests components while others are not. tool_test_results_dict = tool_test_results_dicts.pop( 0 ) else: # The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used. tool_test_results_dict = {} else: # Create a new dictionary for the most recent test run. tool_test_results_dict = {} test_environment_dict = tool_test_results_dict.get( 'test_environment', {} ) # Add the current time as the approximate time that this test run occurs. A similar value will also be # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed # may be configured to store multiple test run results, so each must be associated with a time stamp. now = time.strftime( "%Y-%m-%d %H:%M:%S" ) test_environment_dict[ 'time_tested' ] = now test_environment_dict[ 'tool_shed_database_version' ] = get_database_version( app ) test_environment_dict[ 'tool_shed_mercurial_version' ] = __version__.version test_environment_dict[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() ) tool_test_results_dict[ 'test_environment' ] = test_environment_dict # The repository_metadata.time_last_tested column is not changed by this script since no testing is performed here. if missing_test_components: # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been # found in this revision, and: # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. # In this case, the revision will never be updated with the missing components, and re-testing it would be redundant. # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable # revision. In this case, if the repository is updated with test data or functional tests, the downloadable # changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable # changeset revision will be created, either of which will be automatically checked and flagged as appropriate. # In the install and test script, this behavior is slightly different, since we do want to always run functional # tests on the most recent downloadable changeset revision. if should_set_do_not_test_flag( app, repository, changeset_revision, testable_revision ): print "# Setting do_not_test to True on revision %s of %s owned by %s because it is missing test components" % \ ( changeset_revision, name, owner ) print "# and it is not the latest downloadable revision." repository_metadata.do_not_test = True if not testable_revision: # Even though some tools may be missing test components, it may be possible to test other tools. Since the # install and test framework filters out repositories marked as missing test components, we'll set it only if # no tools can be tested. print '# Setting missing_test_components to True for revision %s of %s owned by %s because all tools are missing test components.' % \ ( changeset_revision, name, owner ) repository_metadata.missing_test_components = True print "# Setting tools_functionally_correct to False on revision %s of %s owned by %s because it is missing test components" % \ ( changeset_revision, name, owner ) repository_metadata.tools_functionally_correct = False tool_test_results_dict[ 'missing_test_components' ] = missing_test_components # Store only the configured number of test runs. num_tool_test_results_saved = int( app.config.num_tool_test_results_saved ) if len( tool_test_results_dicts ) >= num_tool_test_results_saved: test_results_index = num_tool_test_results_saved - 1 new_tool_test_results_dicts = tool_test_results_dicts[ :test_results_index ] else: new_tool_test_results_dicts = [ d for d in tool_test_results_dicts ] # Insert the new element into the first position in the list. new_tool_test_results_dicts.insert( 0, tool_test_results_dict ) repository_metadata.tool_test_results = new_tool_test_results_dicts app.sa_session.add( repository_metadata ) app.sa_session.flush() stop = time.time() print '# -------------------------------------------------------------------------------------------' print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, records_checked ) print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions print '# Found %d tools without functional tests.' % no_tests print '# Found %d tools with functional tests.' % has_tests if info_only: print '# Database not updated, info_only set.' print "# Elapsed time: ", stop - start print "#############################################################################"
def edit_review( self, trans, **kwd ): # The value of the received id is the encoded review id. message = kwd.get( 'message', '' ) status = kwd.get( 'status', 'done' ) review_id = kwd.get( 'id', None ) review = review_util.get_review( trans, review_id ) components_dict = odict() for component in review_util.get_components( trans ): components_dict[ component.name ] = dict( component=component, component_review=None ) repository = review.repository repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) ) for component_review in review.component_reviews: if component_review and component_review.component: component_name = component_review.component.name if component_name in components_dict: component_review_dict = components_dict[ component_name ] component_review_dict[ 'component_review' ] = component_review components_dict[ component_name ] = component_review_dict # Handle a Save button click. save_button_clicked = False save_buttons = [ '%s%sreview_button' % ( component_name, STRSEP ) for component_name in components_dict.keys() ] save_buttons.append( 'revision_approved_button' ) for save_button in save_buttons: if save_button in kwd: save_button_clicked = True break if save_button_clicked: # Handle the revision_approved_select_field value. revision_approved = kwd.get( 'revision_approved', None ) revision_approved_setting_changed = False if revision_approved: revision_approved = str( revision_approved ) if review.approved != revision_approved: revision_approved_setting_changed = True review.approved = revision_approved trans.sa_session.add( review ) trans.sa_session.flush() saved_component_names = [] for component_name in components_dict.keys(): flushed = False # Retrieve the review information from the form. # The star rating form field is a radio button list, so it will not be received if it was not clicked in the form. # Due to this behavior, default the value to 0. rating = 0 for k, v in kwd.items(): if k.startswith( '%s%s' % ( component_name, STRSEP ) ): component_review_attr = k.replace( '%s%s' % ( component_name, STRSEP ), '' ) if component_review_attr == 'component_id': component_id = str( v ) elif component_review_attr == 'comment': comment = str( v ) elif component_review_attr == 'private': private = CheckboxField.is_checked( v ) elif component_review_attr == 'approved': approved = str( v ) elif component_review_attr == 'rating': rating = int( str( v ) ) component = review_util.get_component( trans, component_id ) component_review = review_util.get_component_review_by_repository_review_id_component_id( trans, review_id, component_id ) if component_review: # See if the existing component review should be updated. if component_review.comment != comment or \ component_review.private != private or \ component_review.approved != approved or \ component_review.rating != rating: component_review.comment = comment component_review.private = private component_review.approved = approved component_review.rating = rating trans.sa_session.add( component_review ) trans.sa_session.flush() flushed = True saved_component_names.append( component_name ) else: # See if a new component_review should be created. if comment or private or approved != trans.model.ComponentReview.approved_states.NO or rating: component_review = trans.model.ComponentReview( repository_review_id=review.id, component_id=component.id, comment=comment, approved=approved, rating=rating ) trans.sa_session.add( component_review ) trans.sa_session.flush() flushed = True saved_component_names.append( component_name ) if flushed: # Update the repository rating value to be the average of all component review ratings. average_rating = trans.sa_session.query( func.avg( trans.model.ComponentReview.table.c.rating ) ) \ .filter( and_( trans.model.ComponentReview.table.c.repository_review_id == review.id, trans.model.ComponentReview.table.c.deleted == False, trans.model.ComponentReview.table.c.approved != trans.model.ComponentReview.approved_states.NA ) ) \ .scalar() if average_rating is not None: review.rating = int( average_rating ) trans.sa_session.add( review ) trans.sa_session.flush() # Update the information in components_dict. if component_name in components_dict: component_review_dict = components_dict[ component_name ] component_review_dict[ 'component_review' ] = component_review components_dict[ component_name ] = component_review_dict if revision_approved_setting_changed: message += 'Approved value <b>%s</b> saved for this revision.<br/>' % review.approved if saved_component_names: message += 'Reviews were saved for components: %s' % ', '.join( saved_component_names ) if not revision_approved_setting_changed and not saved_component_names: message += 'No changes were made to this review, so nothing was saved.' if review and review.approved: selected_value = review.approved else: selected_value = trans.model.ComponentReview.approved_states.NO revision_approved_select_field = grids_util.build_approved_select_field( trans, name='revision_approved', selected_value=selected_value, for_component=False ) rev, changeset_revision_label = suc.get_rev_label_from_changeset_revision( repo, review.changeset_revision ) return trans.fill_template( '/webapps/tool_shed/repository_review/edit_review.mako', repository=repository, review=review, changeset_revision_label=changeset_revision_label, revision_approved_select_field=revision_approved_select_field, components_dict=components_dict, message=message, status=status )
def check_and_flag_repositories( app, info_only=False, verbosity=1 ): ''' This method will iterate through all records in the repository_metadata table, checking each one for tool metadata, then checking the tool metadata for tests. Each tool's metadata should look something like: { "add_to_tool_panel": true, "description": "", "guid": "toolshed.url:9009/repos/owner/name/tool_id/1.2.3", "id": "tool_wrapper", "name": "Map with Tool Wrapper", "requirements": [], "tests": [ { "inputs": [ [ "parameter", "value" ], [ "other_parameter", "other_value" ], ], "name": "Test-1", "outputs": [ [ "output_field_name", "output_file_name.bed" ] ], "required_files": [ '1.bed', '2.bed', '3.bed' ] } ], "tool_config": "database/community_files/000/repo_1/tool_wrapper.xml", "tool_type": "default", "version": "1.2.3", "version_string_cmd": null } If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository) not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision not to be tested. TODO: Update this dict structure with the recently added components. If any error is encountered, the script will update the repository_metadata.tool_test_results attribute following this structure: { "test_environment": { "galaxy_revision": "9001:abcd1234", "galaxy_database_version": "114", "tool_shed_revision": "9001:abcd1234", "tool_shed_mercurial_version": "2.3.1", "tool_shed_database_version": "17", "python_version": "2.7.2", "architecture": "x86_64", "system": "Darwin 12.2.0" }, "passed_tests": [ { "test_id": "The test ID, generated by twill", "tool_id": "The tool ID that was tested", "tool_version": "The tool version that was tested", }, ] "failed_tests": [ { "test_id": "The test ID, generated by twill", "tool_id": "The tool ID that was tested", "tool_version": "The tool version that was tested", "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." "traceback": "The captured traceback." }, ] "installation_errors": { 'tool_dependencies': [ { 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', 'name': 'Name of the tool dependency.', 'version': 'Version if this is a package, otherwise blank.', 'error_message': 'The error message returned when installation was attempted.', }, ], 'repository_dependencies': [ { 'tool_shed': 'The tool shed that this repository was installed from.', 'name': 'The name of the repository that failed to install.', 'owner': 'Owner of the failed repository.', 'changeset_revision': 'Changeset revision of the failed repository.', 'error_message': 'The error message that was returned when the repository failed to install.', }, ], 'current_repository': [ { 'tool_shed': 'The tool shed that this repository was installed from.', 'name': 'The name of the repository that failed to install.', 'owner': 'Owner of the failed repository.', 'changeset_revision': 'Changeset revision of the failed repository.', 'error_message': 'The error message that was returned when the repository failed to install.', }, ], { "name": "The name of the repository.", "owner": "The owner of the repository.", "changeset_revision": "The changeset revision of the repository.", "error_message": "The message stored in tool_dependency.error_message." }, } "missing_test_components": [ { "tool_id": "The tool ID that missing components.", "tool_version": "The version of the tool." "tool_guid": "The guid of the tool." "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory." }, ] } ''' start = time.time() skip_metadata_ids = [] checked_repository_ids = [] tool_count = 0 has_tests = 0 no_tests = 0 no_tools = 0 valid_revisions = 0 invalid_revisions = 0 records_checked = 0 # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway. skip_metadata_ids = select( [ app.model.SkipToolTest.table.c.repository_metadata_id ] ) # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test, # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable, # because it's redundant to test a revision that a user can't install. for metadata_record in app.sa_session.query( app.model.RepositoryMetadata ) \ .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True, app.model.RepositoryMetadata.table.c.includes_tools == True, app.model.RepositoryMetadata.table.c.do_not_test == False, not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ): records_checked += 1 # Initialize the repository_status dict with the test environment, but leave the test_errors empty. repository_status = {} if metadata_record.tool_test_results: repository_status = metadata_record.tool_test_results # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows, # or tests incorrectly labeled as invalid. missing_test_components = [] if 'test_environment' in repository_status: repository_status[ 'test_environment' ] = get_test_environment( repository_status[ 'test_environment' ] ) else: repository_status[ 'test_environment' ] = get_test_environment() repository_status[ 'test_environment' ][ 'tool_shed_database_version' ] = get_database_version( app ) repository_status[ 'test_environment' ][ 'tool_shed_mercurial_version' ] = __version__.version repository_status[ 'test_environment' ][ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() ) name = metadata_record.repository.name owner = metadata_record.repository.user.username changeset_revision = str( metadata_record.changeset_revision ) if metadata_record.repository.id not in checked_repository_ids: checked_repository_ids.append( metadata_record.repository.id ) if verbosity >= 1: print '# -------------------------------------------------------------------------------------------' print '# Now checking revision %s of %s, owned by %s.' % ( changeset_revision, name, owner ) # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning # only repositories that contain tools. if 'tools' not in metadata_record.metadata: continue else: has_test_data = False testable_revision_found = False # Clone the repository up to the changeset revision we're checking. repo_dir = metadata_record.repository.repo_path( app ) repo = hg.repository( get_configured_ui(), repo_dir ) work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-cafr" ) cloned_ok, error_message = clone_repository( repo_dir, work_dir, changeset_revision ) if cloned_ok: # Iterate through all the directories in the cloned changeset revision and determine whether there's a # directory named test-data. If this directory is not present, update the metadata record for the changeset # revision we're checking. for root, dirs, files in os.walk( work_dir ): if '.hg' in dirs: dirs.remove( '.hg' ) if 'test-data' in dirs: has_test_data = True test_data_path = os.path.join( root, dirs[ dirs.index( 'test-data' ) ] ) break if verbosity >= 1: if not has_test_data: print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) else: print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \ ( changeset_revision, name, owner ) # Loop through all the tools in this metadata record, checking each one for defined functional tests. for tool_metadata in metadata_record.metadata[ 'tools' ]: tool_count += 1 tool_id = tool_metadata[ 'id' ] tool_version = tool_metadata[ 'version' ] tool_guid = tool_metadata[ 'guid' ] if verbosity >= 2: print "# Checking tool ID '%s' in changeset revision %s of %s." % \ ( tool_id, changeset_revision, name ) # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails, # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the # automated functional test framework produces. tool_has_tests = True if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]: tool_has_tests = False if verbosity >= 2: print '# No functional tests defined for %s.' % tool_id no_tests += 1 else: tool_has_tests = True if verbosity >= 2: print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \ ( tool_id, changeset_revision, name ) has_tests += 1 failure_reason = '' problem_found = False missing_test_files = [] has_test_files = False if tool_has_tests and has_test_data: missing_test_files = check_for_missing_test_files( tool_metadata[ 'tests' ], test_data_path ) if missing_test_files: if verbosity >= 2: print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \ ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) ) else: has_test_files = True if not has_test_data: failure_reason += 'Repository does not have a test-data directory. ' problem_found = True if not tool_has_tests: failure_reason += 'Functional test definitions missing for %s. ' % tool_id problem_found = True if missing_test_files: failure_reason += 'One or more test files are missing for tool %s: %s' % ( tool_id, ', '.join( missing_test_files ) ) problem_found = True test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid, missing_components=failure_reason ) # The repository_metadata.tool_test_results attribute should always have the following structure: # { # "test_environment": # { # "galaxy_revision": "9001:abcd1234", # "galaxy_database_version": "114", # "tool_shed_revision": "9001:abcd1234", # "tool_shed_mercurial_version": "2.3.1", # "tool_shed_database_version": "17", # "python_version": "2.7.2", # "architecture": "x86_64", # "system": "Darwin 12.2.0" # }, # "passed_tests": # [ # { # "test_id": "The test ID, generated by twill", # "tool_id": "The tool ID that was tested", # "tool_version": "The tool version that was tested", # }, # ] # "failed_tests": # [ # { # "test_id": "The test ID, generated by twill", # "tool_id": "The tool ID that was tested", # "tool_version": "The tool version that was tested", # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." # "traceback": "The captured traceback." # }, # ] # "installation_errors": # { # 'tool_dependencies': # [ # { # 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', # 'name': 'Name of the tool dependency.', # 'version': 'Version if this is a package, otherwise blank.', # 'error_message': 'The error message returned when installation was attempted.', # }, # ], # 'repository_dependencies': # [ # { # 'tool_shed': 'The tool shed that this repository was installed from.', # 'name': 'The name of the repository that failed to install.', # 'owner': 'Owner of the failed repository.', # 'changeset_revision': 'Changeset revision of the failed repository.', # 'error_message': 'The error message that was returned when the repository failed to install.', # }, # ], # 'current_repository': # [ # { # 'tool_shed': 'The tool shed that this repository was installed from.', # 'name': 'The name of the repository that failed to install.', # 'owner': 'Owner of the failed repository.', # 'changeset_revision': 'Changeset revision of the failed repository.', # 'error_message': 'The error message that was returned when the repository failed to install.', # }, # ], # { # "name": "The name of the repository.", # "owner": "The owner of the repository.", # "changeset_revision": "The changeset revision of the repository.", # "error_message": "The message stored in tool_dependency.error_message." # }, # } # "missing_test_components": # [ # { # "tool_id": "The tool ID that missing components.", # "tool_version": "The version of the tool." # "tool_guid": "The guid of the tool." # "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory." # }, # ] # } # # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other # than the list relevant to what it is testing. # Only append this error dict if it hasn't already been added. if problem_found: if test_errors not in missing_test_components: missing_test_components.append( test_errors ) if tool_has_tests and has_test_files: testable_revision_found = True # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons. if os.path.exists( work_dir ): shutil.rmtree( work_dir ) if not missing_test_components: valid_revisions += 1 if verbosity >= 1: print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) else: invalid_revisions += 1 if verbosity >= 1: print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) if verbosity >= 2: for invalid_test in missing_test_components: if 'missing_components' in invalid_test: print '# %s' % invalid_test[ 'missing_components' ] if not info_only: # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform # on which this script was run. if missing_test_components: # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been # found in this revision, and: # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. # In this case, the revision will never be updated with the missing components, and re-testing it would be redundant. # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable # revision. In this case, if the repository is updated with test data or functional tests, the downloadable # changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable # changeset revision will be created, either of which will be automatically checked and flagged as appropriate. # In the install and test script, this behavior is slightly different, since we do want to always run functional # tests on the most recent downloadable changeset revision. if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ) and not testable_revision_found: metadata_record.do_not_test = True metadata_record.tools_functionally_correct = False metadata_record.missing_test_components = True repository_status[ 'missing_test_components' ] = missing_test_components metadata_record.tool_test_results = repository_status metadata_record.time_last_tested = datetime.utcnow() app.sa_session.add( metadata_record ) app.sa_session.flush() stop = time.time() print '# -------------------------------------------------------------------------------------------' print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, records_checked ) print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions print '# Found %d tools without functional tests.' % no_tests print '# Found %d tools with functional tests.' % has_tests if info_only: print '# Database not updated, info_only set.' print "# Elapsed time: ", stop - start print "#############################################################################"
def upload( self, trans, **kwd ): message = kwd.get( 'message', '' ) status = kwd.get( 'status', 'done' ) commit_message = kwd.get( 'commit_message', 'Uploaded' ) category_ids = util.listify( kwd.get( 'category_id', '' ) ) categories = suc.get_categories( trans ) repository_id = kwd.get( 'repository_id', '' ) repository = suc.get_repository_in_tool_shed( trans, repository_id ) repo_dir = repository.repo_path( trans.app ) repo = hg.repository( suc.get_configured_ui(), repo_dir ) uncompress_file = util.string_as_bool( kwd.get( 'uncompress_file', 'true' ) ) remove_repo_files_not_in_tar = util.string_as_bool( kwd.get( 'remove_repo_files_not_in_tar', 'true' ) ) uploaded_file = None upload_point = commit_util.get_upload_point( repository, **kwd ) tip = repository.tip( trans.app ) file_data = kwd.get( 'file_data', '' ) url = kwd.get( 'url', '' ) # Part of the upload process is sending email notification to those that have registered to # receive them. One scenario occurs when the first change set is produced for the repository. # See the suc.handle_email_alerts() method for the definition of the scenarios. new_repo_alert = repository.is_new( trans.app ) uploaded_directory = None if kwd.get( 'upload_button', False ): if file_data == '' and url == '': message = 'No files were entered on the upload form.' status = 'error' uploaded_file = None elif url and url.startswith( 'hg' ): # Use mercurial clone to fetch repository, contents will then be copied over. uploaded_directory = tempfile.mkdtemp() repo_url = 'http%s' % url[ len( 'hg' ): ] repo_url = repo_url.encode( 'ascii', 'replace' ) commands.clone( suc.get_configured_ui(), repo_url, uploaded_directory ) elif url: valid_url = True try: stream = urllib.urlopen( url ) except Exception, e: valid_url = False message = 'Error uploading file via http: %s' % str( e ) status = 'error' uploaded_file = None if valid_url: fd, uploaded_file_name = tempfile.mkstemp() uploaded_file = open( uploaded_file_name, 'wb' ) while 1: chunk = stream.read( util.CHUNK_SIZE ) if not chunk: break uploaded_file.write( chunk ) uploaded_file.flush() uploaded_file_filename = url.split( '/' )[ -1 ] isempty = os.path.getsize( os.path.abspath( uploaded_file_name ) ) == 0 elif file_data not in ( '', None ): uploaded_file = file_data.file uploaded_file_name = uploaded_file.name uploaded_file_filename = os.path.split( file_data.filename )[ -1 ] isempty = os.path.getsize( os.path.abspath( uploaded_file_name ) ) == 0 if uploaded_file or uploaded_directory: ok = True isgzip = False isbz2 = False if uploaded_file: if uncompress_file: isgzip = checkers.is_gzip( uploaded_file_name ) if not isgzip: isbz2 = checkers.is_bz2( uploaded_file_name ) if isempty: tar = None istar = False else: # Determine what we have - a single file or an archive try: if ( isgzip or isbz2 ) and uncompress_file: # Open for reading with transparent compression. tar = tarfile.open( uploaded_file_name, 'r:*' ) else: tar = tarfile.open( uploaded_file_name ) istar = True except tarfile.ReadError, e: tar = None istar = False else: # Uploaded directory istar = False if istar: ok, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \ self.upload_tar( trans, repository, tar, uploaded_file, upload_point, remove_repo_files_not_in_tar, commit_message, new_repo_alert ) elif uploaded_directory: ok, message, files_to_remove, content_alert_str, undesirable_dirs_removed, undesirable_files_removed = \ self.upload_directory( trans, repository, uploaded_directory, upload_point, remove_repo_files_not_in_tar, commit_message, new_repo_alert ) else: if ( isgzip or isbz2 ) and uncompress_file: uploaded_file_filename = commit_util.uncompress( repository, uploaded_file_name, uploaded_file_filename, isgzip=isgzip, isbz2=isbz2 ) if repository.type == rt_util.TOOL_DEPENDENCY_DEFINITION and uploaded_file_filename != suc.TOOL_DEPENDENCY_DEFINITION_FILENAME: ok = False message = 'Repositories of type <b>Tool dependency definition</b> can only contain a single file named <b>tool_dependencies.xml</b>.' if ok: if upload_point is not None: full_path = os.path.abspath( os.path.join( repo_dir, upload_point, uploaded_file_filename ) ) else: full_path = os.path.abspath( os.path.join( repo_dir, uploaded_file_filename ) ) # Move some version of the uploaded file to the load_point within the repository hierarchy. if uploaded_file_filename in [ suc.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME ]: # Inspect the contents of the file to see if changeset_revision values are missing and if so, set them appropriately. altered, root_elem, error_message = commit_util.handle_repository_dependencies_definition( trans, uploaded_file_name, unpopulate=False ) if error_message: ok = False message = error_message status = 'error' elif altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem ) shutil.move( tmp_filename, full_path ) else: shutil.move( uploaded_file_name, full_path ) elif uploaded_file_filename in [ suc.TOOL_DEPENDENCY_DEFINITION_FILENAME ]: # Inspect the contents of the file to see if it defines a complex repository dependency definition whose changeset_revision values # are missing and if so, set them appropriately. altered, root_elem, error_message = commit_util.handle_tool_dependencies_definition( trans, uploaded_file_name ) if error_message: ok = False message = error_message status = 'error' if ok: if altered: tmp_filename = xml_util.create_and_write_tmp_file( root_elem ) shutil.move( tmp_filename, full_path ) else: shutil.move( uploaded_file_name, full_path ) else: shutil.move( uploaded_file_name, full_path ) if ok: # See if any admin users have chosen to receive email alerts when a repository is updated. If so, check every uploaded file to ensure # content is appropriate. check_contents = commit_util.check_file_contents_for_email_alerts( trans ) if check_contents and os.path.isfile( full_path ): content_alert_str = commit_util.check_file_content_for_html_and_images( full_path ) else: content_alert_str = '' commands.add( repo.ui, repo, full_path ) # Convert from unicode to prevent "TypeError: array item must be char" full_path = full_path.encode( 'ascii', 'replace' ) commands.commit( repo.ui, repo, full_path, user=trans.user.username, message=commit_message ) if full_path.endswith( 'tool_data_table_conf.xml.sample' ): # Handle the special case where a tool_data_table_conf.xml.sample file is being uploaded by parsing the file and adding new entries # to the in-memory trans.app.tool_data_tables dictionary. error, error_message = tool_util.handle_sample_tool_data_table_conf_file( trans.app, full_path ) if error: message = '%s<br/>%s' % ( message, error_message ) # See if the content of the change set was valid. admin_only = len( repository.downloadable_revisions ) != 1 suc.handle_email_alerts( trans, repository, content_alert_str=content_alert_str, new_repo_alert=new_repo_alert, admin_only=admin_only ) if ok: # Update the repository files for browsing. suc.update_repository( repo ) # Get the new repository tip. if tip == repository.tip( trans.app ): message = 'No changes to repository. ' status = 'warning' else: if ( isgzip or isbz2 ) and uncompress_file: uncompress_str = ' uncompressed and ' else: uncompress_str = ' ' if uploaded_directory: source_type = "repository" source = url else: source_type = "file" source = uploaded_file_filename message = "The %s <b>%s</b> has been successfully%suploaded to the repository. " % ( source_type, source, uncompress_str ) if istar and ( undesirable_dirs_removed or undesirable_files_removed ): items_removed = undesirable_dirs_removed + undesirable_files_removed message += " %d undesirable items (.hg .svn .git directories, .DS_Store, hgrc files, etc) were removed from the archive. " % items_removed if istar and remove_repo_files_not_in_tar and files_to_remove: if upload_point is not None: message += " %d files were removed from the repository relative to the selected upload point '%s'. " % ( len( files_to_remove ), upload_point ) else: message += " %d files were removed from the repository root. " % len( files_to_remove ) kwd[ 'message' ] = message metadata_util.set_repository_metadata_due_to_new_tip( trans, repository, content_alert_str=content_alert_str, **kwd ) if repository.metadata_revisions: # A repository's metadata revisions are order descending by update_time, so the zeroth revision will be the tip just after an upload. metadata_dict = repository.metadata_revisions[0].metadata else: metadata_dict = {} if str( repository.type ) != rt_util.TOOL_DEPENDENCY_DEFINITION: change_repository_type_message = tool_dependency_util.generate_message_for_repository_type_change( trans, repository ) if change_repository_type_message: message += change_repository_type_message status = 'warning' else: # Provide a warning message if a tool_dependencies.xml file is provided, but tool dependencies weren't loaded due to a requirement tag mismatch # or some other problem. Tool dependency definitions can define orphan tool dependencies (no relationship to any tools contained in the repository), # so warning messages are important because orphans are always valid. The repository owner must be warned in case they did not intend to define an # orphan dependency, but simply provided incorrect information (tool shed, name owner, changeset_revision) for the definition. orphan_message = tool_dependency_util.generate_message_for_orphan_tool_dependencies( trans, repository, metadata_dict ) if orphan_message: message += orphan_message status = 'warning' # Handle messaging for invalid tool dependencies. invalid_tool_dependencies_message = tool_dependency_util.generate_message_for_invalid_tool_dependencies( metadata_dict ) if invalid_tool_dependencies_message: message += invalid_tool_dependencies_message status = 'error' # Handle messaging for invalid repository dependencies. invalid_repository_dependencies_message = repository_dependency_util.generate_message_for_invalid_repository_dependencies( metadata_dict ) if invalid_repository_dependencies_message: message += invalid_repository_dependencies_message status = 'error' # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file. tool_util.reset_tool_data_tables( trans.app ) if uploaded_directory: suc.remove_dir( uploaded_directory ) trans.response.send_redirect( web.url_for( controller='repository', action='browse_repository', id=repository_id, commit_message='Deleted selected files', message=message, status=status ) ) else: if uploaded_directory: suc.remove_dir( uploaded_directory ) status = 'error' # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file. tool_util.reset_tool_data_tables( trans.app )