Пример #1
0
 def __init__( self, root_dir=None, config=None ):
     self.tool_sheds = odict()
     self.tool_sheds_auth = odict()
     if root_dir and config:
         # Parse tool_sheds_conf.xml
         tree, error_message = xml_util.parse_xml( config )
         if tree is None:
             log.warning( "Unable to load references to tool sheds defined in file %s" % str( config ) )
         else:
             root = tree.getroot()
             log.debug( 'Loading references to tool sheds from %s' % config )
             for elem in root.findall( 'tool_shed' ):
                 try:
                     name = elem.get( 'name', None )
                     url = elem.get( 'url', None )
                     username = elem.get( 'user', None )
                     password = elem.get( 'pass', None )
                     if name and url:
                         self.tool_sheds[ name ] = url
                         self.tool_sheds_auth[ name ] = None
                         log.debug( 'Loaded reference to tool shed: %s' % name )
                     if name and url and username and password:
                         pass_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
                         pass_mgr.add_password( None, url, username, password )
                         self.tool_sheds_auth[ name ] = pass_mgr
                 except Exception, e:
                     log.warning( 'Error loading reference to tool shed "%s", problem: %s' % ( name, str( e ) ) )
 def __init__( self, app, xml_filename=None ):
     self.app = app
     self.data_managers = odict()
     self.managed_data_tables = odict()
     self.tool_path = None
     self.filename = xml_filename or self.app.config.data_manager_config_file
     self.load_from_xml( self.filename )
     if self.app.config.shed_data_manager_config_file:
         self.load_from_xml( self.app.config.shed_data_manager_config_file, store_tool_path=False, replace_existing=True )
Пример #3
0
 def parse_outputs(self, tool):
     outputs = self.root_dict.get("outputs", {})
     output_defs = []
     for name, output_dict in outputs.items():
         output_defs.append(self._parse_output(tool, name, output_dict))
     outputs = odict()
     for output in output_defs:
         outputs[output.name] = output
     # TODO: parse outputs collections
     return output_defs, odict()
Пример #4
0
 def get_inital_values( self, data, trans ):
     if self.other_values:
         rval = odict( self.other_values )
     else:
         rval = odict()
     rval.update( { 'BASE_URL': trans.request.base, 'APP': trans.app } ) #trans automatically appears as a response, need to add properties of trans that we want here
     for key, value in  BASE_PARAMS.iteritems(): #add helper functions/variables
         rval[ key ] = value
     rval[ DEFAULT_DATASET_NAME ] = data #always have the display dataset name available
     return rval
Пример #5
0
 def parse_outputs(self, tool):
     output_instances = self.tool_proxy.output_instances()
     outputs = odict()
     output_defs = []
     for output_instance in output_instances:
         output_defs.append(self._parse_output(tool, output_instance))
     # TODO: parse outputs collections
     for output_def in output_defs:
         outputs[output_def.name] = output_def
     return outputs, odict()
Пример #6
0
def check_for_missing_tools( app, tool_panel_configs, latest_tool_migration_script_number ):
    # Get the 000x_tools.xml file associated with the current migrate_tools version number.
    tools_xml_file_path = os.path.abspath( os.path.join( 'scripts', 'migrate_tools', '%04d_tools.xml' % latest_tool_migration_script_number ) )
    # Parse the XML and load the file attributes for later checking against the proprietary tool_panel_config.
    migrated_tool_configs_dict = odict()
    tree, error_message = xml_util.parse_xml( tools_xml_file_path )
    if tree is None:
        return False, odict()
    root = tree.getroot()
    tool_shed = root.get( 'name' )
    tool_shed_url = get_tool_shed_url_from_tools_xml_file_path( app, tool_shed )
    # The default behavior is that the tool shed is down.
    tool_shed_accessible = False
    missing_tool_configs_dict = odict()
    if tool_shed_url:
        for elem in root:
            if elem.tag == 'repository':
                tool_dependencies = []
                tool_dependencies_dict = {}
                repository_name = elem.get( 'name' )
                changeset_revision = elem.get( 'changeset_revision' )
                url = '%s/repository/get_tool_dependencies?name=%s&owner=%s&changeset_revision=%s&from_install_manager=True' % \
                ( tool_shed_url, repository_name, REPOSITORY_OWNER, changeset_revision )
                try:
                    text = tool_shed_get( app, tool_shed_url, url )
                    tool_shed_accessible = True
                except Exception, e:
                    # Tool shed may be unavailable - we have to set tool_shed_accessible since we're looping.
                    tool_shed_accessible = False
                    print "The URL\n%s\nraised the exception:\n%s\n" % ( url, str( e ) )
                if tool_shed_accessible:
                    if text:
                        tool_dependencies_dict = encoding_util.tool_shed_decode( text )
                        for dependency_key, requirements_dict in tool_dependencies_dict.items():
                            tool_dependency_name = requirements_dict[ 'name' ]
                            tool_dependency_version = requirements_dict[ 'version' ]
                            tool_dependency_type = requirements_dict[ 'type' ]
                            tool_dependency_readme = requirements_dict.get( 'readme', '' )
                            tool_dependencies.append( ( tool_dependency_name, tool_dependency_version, tool_dependency_type, tool_dependency_readme ) )
                    for tool_elem in elem.findall( 'tool' ):
                        migrated_tool_configs_dict[ tool_elem.get( 'file' ) ] = tool_dependencies
        if tool_shed_accessible:
            # Parse the proprietary tool_panel_configs (the default is tool_conf.xml) and generate the list of missing tool config file names.
            for tool_panel_config in tool_panel_configs:
                tree, error_message = xml_util.parse_xml( tool_panel_config )
                if tree:
                    root = tree.getroot()
                    for elem in root:
                        if elem.tag == 'tool':
                            missing_tool_configs_dict = check_tool_tag_set( elem, migrated_tool_configs_dict, missing_tool_configs_dict )
                        elif elem.tag == 'section':
                            for section_elem in elem:
                                if section_elem.tag == 'tool':
                                    missing_tool_configs_dict = check_tool_tag_set( section_elem, migrated_tool_configs_dict, missing_tool_configs_dict )
Пример #7
0
 def __init__(self, app, xml_filename=None):
     self.app = app
     self.data_managers = odict()
     self.managed_data_tables = odict()
     self.tool_path = None
     self._reload_count = 0
     self.filename = xml_filename or self.app.config.data_manager_config_file
     for filename in util.listify(self.filename):
         if not filename:
             continue
         self.load_from_xml(filename)
     if self.app.config.shed_data_manager_config_file:
         self.load_from_xml(self.app.config.shed_data_manager_config_file, store_tool_path=True)
Пример #8
0
def check_for_missing_tools( app, tool_panel_configs, latest_tool_migration_script_number ):
    # Get the 000x_tools.xml file associated with the current migrate_tools version number.
    tools_xml_file_path = os.path.abspath( os.path.join( 'scripts', 'migrate_tools', '%04d_tools.xml' % latest_tool_migration_script_number ) )
    # Parse the XML and load the file attributes for later checking against the proprietary tool_panel_config.
    migrated_tool_configs_dict = odict()
    tree = util.parse_xml( tools_xml_file_path )
    root = tree.getroot()
    tool_shed = root.get( 'name' )
    tool_shed_url = get_tool_shed_url_from_tools_xml_file_path( app, tool_shed )
    if tool_shed_url:
        for elem in root:
            if elem.tag == 'repository':
                tool_dependencies = []
                tool_dependencies_dict = {}
                repository_name = elem.get( 'name' )
                changeset_revision = elem.get( 'changeset_revision' )
                url = '%s/repository/get_tool_dependencies?name=%s&owner=%s&changeset_revision=%s&webapp=install_manager&no_reset=true' % \
                ( tool_shed_url, repository_name, REPOSITORY_OWNER, changeset_revision )
                response = urllib2.urlopen( url )
                text = response.read()
                response.close()
                if text:
                    tool_dependencies_dict = tool_shed_decode( text )
                    for dependency_key, requirements_dict in tool_dependencies_dict.items():
                        tool_dependency_name = requirements_dict[ 'name' ]
                        tool_dependency_version = requirements_dict[ 'version' ]
                        tool_dependency_type = requirements_dict[ 'type' ]
                        tool_dependency_readme = requirements_dict.get( 'readme', '' )
                        tool_dependencies.append( ( tool_dependency_name, tool_dependency_version, tool_dependency_type, tool_dependency_readme ) )
                for tool_elem in elem.findall( 'tool' ):
                    migrated_tool_configs_dict[ tool_elem.get( 'file' ) ] = tool_dependencies
        # Parse the proprietary tool_panel_configs (the default is tool_conf.xml) and generate the list of missing tool config file names.
        missing_tool_configs_dict = odict()
        for tool_panel_config in tool_panel_configs:
            tree = util.parse_xml( tool_panel_config )
            root = tree.getroot()
            for elem in root:
                if elem.tag == 'tool':
                    missing_tool_configs_dict = check_tool_tag_set( elem, migrated_tool_configs_dict, missing_tool_configs_dict )
                elif elem.tag == 'section':
                    for section_elem in elem:
                        if section_elem.tag == 'tool':
                            missing_tool_configs_dict = check_tool_tag_set( section_elem, migrated_tool_configs_dict, missing_tool_configs_dict )
    else:
        exception_msg = '\n\nThe entry for the main Galaxy tool shed at %s is missing from the %s file.  ' % ( tool_shed, app.config.tool_sheds_config )
        exception_msg += 'The entry for this tool shed must always be available in this file, so re-add it before attempting to start your Galaxy server.\n'
        raise Exception( exception_msg )  
    return missing_tool_configs_dict
Пример #9
0
def _expand_raw_config(ctx, config, path, name=None):
    name_input = name
    if "name" not in config:
        config["name"] = name
    if config["name"] is None:
        config["name"] = path_to_repo_name(path)

    default_include = config.get("include", ["**"])
    repos = config.get("repositories", None)
    auto_tool_repos = config.get("auto_tool_repositories", False)
    suite_config = config.get("suite", False)

    if repos and auto_tool_repos:
        raise Exception(AUTO_REPO_CONFLICT_MESSAGE)
    if auto_tool_repos and name_input:
        raise Exception(AUTO_NAME_CONFLICT_MESSAGE)
    if auto_tool_repos:
        repos = _build_auto_tool_repos(ctx, path, config, auto_tool_repos)
    if suite_config:
        if repos is None:
            repos = odict.odict()
        _build_suite_repo(config, repos, suite_config)
    # If repositories aren't defined, just define a single
    # one based on calculated name and including everything
    # by default.
    if repos is None:
        repos = {config["name"]: {"include": default_include}}
    config["repositories"] = repos
    def __init__(self,
                 name,
                 structure,
                 label=None,
                 filters=None,
                 hidden=False,
                 default_format="data",
                 default_format_source=None,
                 default_metadata_source=None,
                 inherit_format=False,
                 inherit_metadata=False):
        super(ToolOutputCollection, self).__init__(name,
                                                   label=label,
                                                   filters=filters,
                                                   hidden=hidden)
        self.collection = True
        self.default_format = default_format
        self.structure = structure
        self.outputs = odict()

        self.inherit_format = inherit_format
        self.inherit_metadata = inherit_metadata

        self.metadata_source = default_metadata_source
        self.format_source = default_format_source
        self.change_format = []  # TODO
Пример #11
0
def _build_auto_tool_repos(path, config, auto_tool_repos):
    default_include = config.get("include", ["**"])
    tool_els = list(load_tool_elements_from_path(path, recursive=True))
    paths = list(map(lambda pair: pair[0], tool_els))
    excludes = _shed_config_excludes(config)

    def _build_repository(tool_path, tool_el):
        tool_id = tool_el.getroot().get("id")
        tool_name = tool_el.getroot().get("name")
        template_vars = dict(
            tool_id=tool_id,
            tool_name=tool_name,
        )
        other_paths = paths[:]
        other_paths.remove(tool_path)
        tool_excludes = excludes + list(other_paths)
        repo_dict = {
            "include": default_include,
            "exclude": tool_excludes,
        }
        for key in ["name", "description", "long_description"]:
            template_key = "%s_template" % key
            template = auto_tool_repos.get(template_key, None)
            if template:
                value = templates.render(template, **template_vars)
                repo_dict[key] = value
        return repo_dict

    repos = odict.odict()
    for tool_path, tool_el in tool_els:
        repository_config = _build_repository(tool_path, tool_el)
        repository_name = repository_config["name"]
        repos[repository_name] = repository_config
    return repos
Пример #12
0
 def writable_files( self, dataset=None ):
     files = odict()
     if self.composite_type != 'auto_primary_file':
         files[ self.primary_file_name ] = self.__new_composite_file( self.primary_file_name )
     for key, value in self.get_composite_files( dataset=dataset ).iteritems():
         files[ key ] = value
     return files
Пример #13
0
 def review_tool_migration_stages( self, trans, **kwd ):
     message = escape( galaxy.util.restore_text( kwd.get( 'message', '' ) ) )
     status = galaxy.util.restore_text( kwd.get( 'status', 'done' ) )
     migration_stages_dict = odict()
     migration_modules = []
     migration_scripts_dir = os.path.abspath( os.path.join( trans.app.config.root, 'lib', 'tool_shed', 'galaxy_install', 'migrate', 'versions' ) )
     migration_scripts_dir_contents = os.listdir( migration_scripts_dir )
     for item in migration_scripts_dir_contents:
         if os.path.isfile( os.path.join( migration_scripts_dir, item ) ) and item.endswith( '.py' ):
             module = item.replace( '.py', '' )
             migration_modules.append( module )
     if migration_modules:
         migration_modules.sort()
         # Remove the 0001_tools.py script since it is the seed.
         migration_modules = migration_modules[ 1: ]
         # Reverse the list so viewing will be newest to oldest.
         migration_modules.reverse()
     for migration_module in migration_modules:
         migration_stage = int( migration_module.replace( '_tools', '' ) )
         repo_name_dependency_tups = self.check_for_tool_dependencies( trans, migration_stage )
         open_file_obj, file_name, description = imp.find_module( migration_module, [ migration_scripts_dir ] )
         imported_module = imp.load_module( 'upgrade', open_file_obj, file_name, description )
         migration_info = imported_module.__doc__
         open_file_obj.close()
         migration_stages_dict[ migration_stage ] = ( migration_info, repo_name_dependency_tups )
     return trans.fill_template( 'admin/review_tool_migration_stages.mako',
                                 migration_stages_dict=migration_stages_dict,
                                 message=message,
                                 status=status )
Пример #14
0
 def find_files(self, collection, dataset_collectors):
     filenames = odict.odict()
     for path, extra_file_collector in walk_over_extra_files(
         dataset_collectors, self.job_working_directory, collection
     ):
         filenames[path] = extra_file_collector
     return filenames
Пример #15
0
def __main__():
    # Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option( '-a', '--algorithm', dest='algorithms', action='append', type="string", help='Algorithms to use, eg. (md5, sha1, sha224, sha256, sha384, sha512)' )
    parser.add_option( '-i', '--input', dest='input', action='store', type="string", help='Input filename' )
    parser.add_option( '-o', '--output', dest='output', action='store', type="string", help='Output filename' )
    (options, args) = parser.parse_args()

    algorithms = odict()
    for algorithm in options.algorithms:
        assert algorithm in HASH_ALGORITHMS, "Invalid algorithm specified: %s" % ( algorithm )
        assert algorithm not in algorithms, "Specify each algorithm only once."
        algorithms[ algorithm ] = hashlib.new( algorithm )
    assert options.algorithms, "You must provide at least one algorithm."
    assert options.input, "You must provide an input filename."
    assert options.output, "You must provide an output filename."

    input = open( options.input )
    while True:
        chunk = input.read( CHUNK_SIZE )
        if chunk:
            for algorithm in algorithms.itervalues():
                algorithm.update( chunk )
        else:
            break

    output = open( options.output, 'wb' )
    output.write( '#%s\n' % ( '\t'.join( algorithms.keys() ) ) )
    output.write( '%s\n' % ( '\t'.join( map( lambda x: x.hexdigest(), algorithms.values() ) ) ) )
    output.close()
def __main__():
    input_filename = sys.argv[1]
    output_filename = sys.argv[2]
    species = odict()
    cur_size = 0
    for components in iter_fasta_alignment(input_filename):
        species_not_written = species.keys()
        for component in components:
            if component.species not in species:
                species[component.species] = tempfile.TemporaryFile()
                species[component.species].write("-" * cur_size)
            species[component.species].write(component.text)
            try:
                species_not_written.remove(component.species)
            except ValueError:
                #this is a new species
                pass
        for spec in species_not_written:
            species[spec].write("-" * len(components[0].text))
        cur_size += len(components[0].text)
    out = open(output_filename, 'wb')
    for spec, f in species.iteritems():
        f.seek(0)
        out.write(">%s\n%s\n" % (spec, f.read()))
    out.close()
Пример #17
0
 def get_display_applications_by_dataset( self, dataset, trans ):
     rval = odict()
     for key, value in self.display_applications.iteritems():
         value = value.filter_by_dataset( dataset, trans )
         if value.links:
             rval[key] = value
     return rval
Пример #18
0
def get_previous_repository_reviews(app, repository, changeset_revision):
    """
    Return an ordered dictionary of repository reviews up to and including the
    received changeset revision.
    """
    repo = hg_util.get_repo_for_repository(app,
                                           repository=repository,
                                           repo_path=None,
                                           create=False)
    reviewed_revision_hashes = [
        review.changeset_revision for review in repository.reviews
    ]
    previous_reviews_dict = odict()
    for changeset in hg_util.reversed_upper_bounded_changelog(
            repo, changeset_revision):
        previous_changeset_revision = str(repo.changectx(changeset))
        if previous_changeset_revision in reviewed_revision_hashes:
            previous_rev, previous_changeset_revision_label = \
                hg_util.get_rev_label_from_changeset_revision( repo, previous_changeset_revision )
            revision_reviews = get_reviews_by_repository_id_changeset_revision(
                app, app.security.encode_id(repository.id),
                previous_changeset_revision)
            previous_reviews_dict[ previous_changeset_revision ] = \
                dict( changeset_revision_label=previous_changeset_revision_label,
                      reviews=revision_reviews )
    return previous_reviews_dict
Пример #19
0
 def __init__(self,
              config_element,
              tool_data_path,
              from_shed_config=False,
              filename=None,
              tool_data_path_files=None):
     self.name = config_element.get('name')
     self.comment_char = config_element.get('comment_char')
     self.empty_field_value = config_element.get('empty_field_value', '')
     self.empty_field_values = {}
     self.allow_duplicate_entries = util.asbool(
         config_element.get('allow_duplicate_entries', True))
     self.here = filename and os.path.dirname(filename)
     self.filenames = odict()
     self.tool_data_path = tool_data_path
     self.tool_data_path_files = tool_data_path_files
     self.missing_index_file = None
     # increment this variable any time a new entry is added, or when the table is totally reloaded
     # This value has no external meaning, and does not represent an abstract version of the underlying data
     self._loaded_content_version = 1
     self._load_info = ([config_element, tool_data_path], {
         'from_shed_config': from_shed_config,
         'tool_data_path_files': self.tool_data_path_files
     })
     self._merged_load_info = []
Пример #20
0
    def __recursively_create_collections_for_elements(self, trans, elements, hide_source_items, copy_elements):
        if elements is self.ELEMENTS_UNINITIALIZED:
            return

        new_elements = odict.odict()
        for key, element in elements.items():
            if isinstance(element, model.DatasetCollection):
                continue

            if element.get("src", None) != "new_collection":
                continue

            # element is a dict with src new_collection and
            # and odict of named elements
            collection_type = element.get("collection_type", None)
            sub_elements = element["elements"]
            collection = self.create_dataset_collection(
                trans=trans,
                collection_type=collection_type,
                elements=sub_elements,
                hide_source_items=hide_source_items,
                copy_elements=copy_elements
            )
            new_elements[key] = collection
        elements.update(new_elements)
Пример #21
0
 def writable_files( self, dataset = None ):
     files = odict()
     if self.composite_type != 'auto_primary_file':
         files[ self.primary_file_name ] = self.__new_composite_file( self.primary_file_name )
     for key, value in self.get_composite_files( dataset = dataset ).iteritems():
         files[ key ] = value
     return files
Пример #22
0
 def __init__( self, display_application ):
     self.display_application = display_application
     self.parameters = odict() #parameters are populated in order, allowing lower listed ones to have values of higher listed ones
     self.url_param_name_map = {}
     self.url = None
     self.id = None
     self.name = None
Пример #23
0
def _expand_raw_config(config, path, name=None):
    name_input = name
    if "name" not in config:
        config["name"] = name
    if config["name"] is None:
        config["name"] = path_to_repo_name(path)

    default_include = config.get("include", ["**"])
    repos = config.get("repositories", None)
    auto_tool_repos = config.get("auto_tool_repositories", False)
    suite_config = config.get("suite", False)

    if repos and auto_tool_repos:
        raise Exception(AUTO_REPO_CONFLICT_MESSAGE)
    if auto_tool_repos and name_input:
        raise Exception(AUTO_NAME_CONFLICT_MESSAGE)
    if auto_tool_repos:
        repos = _build_auto_tool_repos(path, config, auto_tool_repos)
    if suite_config:
        if repos is None:
            repos = odict.odict()
        _build_suite_repo(config, repos, suite_config)
    # If repositories aren't defined, just define a single
    # one based on calculated name and including everything
    # by default.
    if repos is None:
        repos = {
            config["name"]: {
                "include": default_include
            }
        }
    config["repositories"] = repos
def __main__():
    input_filename = sys.argv[1]
    output_filename = sys.argv[2]
    species = odict()
    cur_size = 0
    for components in iter_fasta_alignment( input_filename ):
        species_not_written = species.keys()
        for component in components:
            if component.species not in species:
                species[component.species] = tempfile.TemporaryFile()
                species[component.species].write( "-" * cur_size )
            species[component.species].write( component.text )
            try:
                species_not_written.remove( component.species )
            except ValueError:
                #this is a new species
                pass
        for spec in species_not_written:
            species[spec].write( "-" * len( components[0].text ) )
        cur_size += len( components[0].text )
    out = open( output_filename, 'wb' )
    for spec, f in species.iteritems():
        f.seek( 0 )
        out.write( ">%s\n%s\n" % ( spec, f.read() ) )
    out.close()
Пример #25
0
    def __init__(self,
                 app,
                 directories_setting=None,
                 skip_bad_plugins=True,
                 **kwargs):
        """
        Set up the manager and load all plugins.

        :type   app:    UniverseApplication
        :param  app:    the application (and its configuration) using this manager
        :type   directories_setting: string (default: None)
        :param  directories_setting: the filesystem path (or paths)
            to search for plugins. Can be CSV string of paths. Will be treated as
            absolute if a path starts with '/', relative otherwise.
        :type   skip_bad_plugins:    boolean (default: True)
        :param  skip_bad_plugins:    whether to skip plugins that cause
            exceptions when loaded or to raise that exception
        """
        self.directories = []
        self.skip_bad_plugins = skip_bad_plugins
        self.plugins = odict.odict()

        self.directories = util.config_directories_from_setting(
            directories_setting, app.config.root)

        self.load_configuration()
        self.load_plugins()
Пример #26
0
def _build_auto_tool_repos(path, config, auto_tool_repos):
    default_include = config.get("include", ["**"])
    tool_els = list(load_tool_elements_from_path(path, recursive=True))
    paths = list(map(lambda pair: pair[0], tool_els))
    excludes = _shed_config_excludes(config)

    def _build_repository(tool_path, tool_el):
        tool_id = tool_el.getroot().get("id")
        tool_name = tool_el.getroot().get("name")
        template_vars = dict(
            tool_id=tool_id,
            tool_name=tool_name,
        )
        other_paths = paths[:]
        other_paths.remove(tool_path)
        tool_excludes = excludes + list(other_paths)
        repo_dict = {
            "include": default_include,
            "exclude": tool_excludes,
        }
        for key in ["name", "description", "long_description"]:
            template_key = "%s_template" % key
            template = auto_tool_repos.get(template_key, None)
            if template:
                value = templates.render(template, **template_vars)
                repo_dict[key] = value
        return repo_dict

    repos = odict.odict()
    for tool_path, tool_el in tool_els:
        repository_config = _build_repository(tool_path, tool_el)
        repository_name = repository_config["name"]
        repos[repository_name] = repository_config
    return repos
Пример #27
0
 def __init__(self, workflow_invocation, inputs_by_step_id, module_injector, jobs_per_scheduling_iteration=-1):
     self.outputs = odict()
     self.module_injector = module_injector
     self.workflow_invocation = workflow_invocation
     self.inputs_by_step_id = inputs_by_step_id
     self.jobs_per_scheduling_iteration = jobs_per_scheduling_iteration
     self.jobs_scheduled_this_iteration = 0
Пример #28
0
def get_job_dict( trans ):
    """
    Return a dictionary of Job -> [ Dataset ] mappings, for all finished
    active Datasets in the current history and the jobs that created them.
    """
    history = trans.get_history()
    # Get the jobs that created the datasets
    warnings = set()
    jobs = odict()
    for dataset in history.active_datasets:
        # FIXME: Create "Dataset.is_finished"
        if dataset.state in ( 'new', 'running', 'queued' ):
            warnings.add( "Some datasets still queued or running were ignored" )
            continue
        
        #if this hda was copied from another, we need to find the job that created the origial hda
        job_hda = dataset
        while job_hda.copied_from_history_dataset_association:
            job_hda = job_hda.copied_from_history_dataset_association
        
        if not job_hda.creating_job_associations:
            jobs[ FakeJob( dataset ) ] = [ ( None, dataset ) ]
        
        for assoc in job_hda.creating_job_associations:
            job = assoc.job
            if job in jobs:
                jobs[ job ].append( ( assoc.name, dataset ) )
            else:
                jobs[ job ] = [ ( assoc.name, dataset ) ]
    return jobs, warnings    
Пример #29
0
 def review_tool_migration_stages( self, trans, **kwd ):
     message = escape( galaxy.util.restore_text( kwd.get( 'message', '' ) ) )
     status = galaxy.util.restore_text( kwd.get( 'status', 'done' ) )
     migration_stages_dict = odict()
     migration_modules = []
     migration_scripts_dir = os.path.abspath( os.path.join( trans.app.config.root, 'lib', 'tool_shed', 'galaxy_install', 'migrate', 'versions' ) )
     migration_scripts_dir_contents = os.listdir( migration_scripts_dir )
     for item in migration_scripts_dir_contents:
         if os.path.isfile( os.path.join( migration_scripts_dir, item ) ) and item.endswith( '.py' ):
             module = item.replace( '.py', '' )
             migration_modules.append( module )
     if migration_modules:
         migration_modules.sort()
         # Remove the 0001_tools.py script since it is the seed.
         migration_modules = migration_modules[ 1: ]
         # Reverse the list so viewing will be newest to oldest.
         migration_modules.reverse()
     for migration_module in migration_modules:
         migration_stage = int( migration_module.replace( '_tools', '' ) )
         repo_name_dependency_tups = self.check_for_tool_dependencies( trans, migration_stage )
         open_file_obj, file_name, description = imp.find_module( migration_module, [ migration_scripts_dir ] )
         imported_module = imp.load_module( 'upgrade', open_file_obj, file_name, description )
         migration_info = imported_module.__doc__
         open_file_obj.close()
         migration_stages_dict[ migration_stage ] = ( migration_info, repo_name_dependency_tups )
     return trans.fill_template( 'admin/review_tool_migration_stages.mako',
                                 migration_stages_dict=migration_stages_dict,
                                 message=message,
                                 status=status )
Пример #30
0
def __main__():
    # Parse Command Line
    parser = optparse.OptionParser()
    parser.add_option( '-a', '--algorithm', dest='algorithms', action='append', type="string", help='Algorithms to use, eg. (md5, sha1, sha224, sha256, sha384, sha512)' )
    parser.add_option( '-i', '--input', dest='input', action='store', type="string", help='Input filename' )
    parser.add_option( '-o', '--output', dest='output', action='store', type="string", help='Output filename' )
    (options, args) = parser.parse_args()

    algorithms = odict()
    for algorithm in options.algorithms:
        assert algorithm in HASH_ALGORITHMS, "Invalid algorithm specified: %s" % ( algorithm )
        assert algorithm not in algorithms, "Specify each algorithm only once."
        algorithms[ algorithm ] = hashlib.new( algorithm )
    assert options.algorithms, "You must provide at least one algorithm."
    assert options.input, "You must provide an input filename."
    assert options.output, "You must provide an output filename."

    input = open( options.input )
    while True:
        chunk = input.read( CHUNK_SIZE )
        if chunk:
            for algorithm in algorithms.values():
                algorithm.update( chunk )
        else:
            break

    output = open( options.output, 'wb' )
    output.write( '#%s\n' % ( '\t'.join( algorithms.keys() ) ) )
    output.write( '%s\n' % ( '\t'.join( x.hexdigest() for x in algorithms.values() ) ) )
    output.close()
Пример #31
0
 def get_display_applications_by_dataset( self, dataset, trans ):
     rval = odict()
     for key, value in self.display_applications.iteritems():
         value = value.filter_by_dataset( dataset, trans )
         if value.links:
             rval[key] = value
     return rval
Пример #32
0
    def __init__(
        self,
        name,
        structure,
        label=None,
        filters=None,
        hidden=False,
        default_format="data",
        default_format_source=None,
        default_metadata_source=None,
        inherit_format=False,
        inherit_metadata=False
    ):
        super( ToolOutputCollection, self ).__init__( name, label=label, filters=filters, hidden=hidden )
        self.collection = True
        self.default_format = default_format
        self.structure = structure
        self.outputs = odict()

        self.inherit_format = inherit_format
        self.inherit_metadata = inherit_metadata

        self.metadata_source = default_metadata_source
        self.format_source = default_format_source
        self.change_format = []  # TODO
Пример #33
0
    def __recursively_create_collections_for_elements(self, trans, elements,
                                                      hide_source_items,
                                                      copy_elements):
        if elements is self.ELEMENTS_UNINITIALIZED:
            return

        new_elements = odict.odict()
        for key, element in elements.items():
            if isinstance(element, model.DatasetCollection):
                continue

            if element.get("src", None) != "new_collection":
                continue

            # element is a dict with src new_collection and
            # and odict of named elements
            collection_type = element.get("collection_type", None)
            sub_elements = element["elements"]
            collection = self.create_dataset_collection(
                trans=trans,
                collection_type=collection_type,
                elements=sub_elements,
                hide_source_items=hide_source_items,
                copy_elements=copy_elements)
            new_elements[key] = collection
        elements.update(new_elements)
Пример #34
0
def create_job(trans, params, tool, json_file_path, data_list, folder=None, history=None, job_params=None):
    """
    Create the upload job.
    """
    job = trans.app.model.Job()
    galaxy_session = trans.get_galaxy_session()
    if type(galaxy_session) == trans.model.GalaxySession:
        job.session_id = galaxy_session.id
    if trans.user is not None:
        job.user_id = trans.user.id
    if folder:
        job.library_folder_id = folder.id
    else:
        if not history:
            history = trans.history
        job.history_id = history.id
    job.tool_id = tool.id
    job.tool_version = tool.version
    job.set_state(job.states.UPLOAD)
    trans.sa_session.add(job)
    trans.sa_session.flush()
    log.info('tool %s created job id %d' % (tool.id, job.id))
    trans.log_event('created job id %d' % job.id, tool_id=tool.id)

    for name, value in tool.params_to_strings(params, trans.app).items():
        job.add_parameter(name, value)
    job.add_parameter('paramfile', dumps(json_file_path))
    object_store_id = None
    for i, dataset in enumerate(data_list):
        if folder:
            job.add_output_library_dataset('output%i' % i, dataset)
        else:
            job.add_output_dataset('output%i' % i, dataset)
        # Create an empty file immediately
        if not dataset.dataset.external_filename:
            dataset.dataset.object_store_id = object_store_id
            try:
                trans.app.object_store.create(dataset.dataset)
            except ObjectInvalid:
                raise Exception('Unable to create output dataset: object store is full')
            object_store_id = dataset.dataset.object_store_id
            trans.sa_session.add(dataset)
            # open( dataset.file_name, "w" ).close()
    job.object_store_id = object_store_id
    job.set_state(job.states.NEW)
    job.set_handler(tool.get_job_handler(None))
    if job_params:
        for name, value in job_params.items():
            job.add_parameter(name, value)
    trans.sa_session.add(job)
    trans.sa_session.flush()

    # Queue the job for execution
    trans.app.job_queue.put(job.id, job.tool_id)
    trans.log_event("Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id)
    output = odict()
    for i, v in enumerate(data_list):
        output['output%i' % i] = v
    return job, output
Пример #35
0
def create_job( trans, params, tool, json_file_path, data_list, folder=None, history=None, job_params=None ):
    """
    Create the upload job.
    """
    job = trans.app.model.Job()
    galaxy_session = trans.get_galaxy_session()
    if type( galaxy_session ) == trans.model.GalaxySession:
        job.session_id = galaxy_session.id
    if trans.user is not None:
        job.user_id = trans.user.id
    if folder:
        job.library_folder_id = folder.id
    else:
        if not history:
            history = trans.history
        job.history_id = history.id
    job.tool_id = tool.id
    job.tool_version = tool.version
    job.set_state( job.states.UPLOAD )
    trans.sa_session.add( job )
    trans.sa_session.flush()
    log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
    trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )

    for name, value in tool.params_to_strings( params, trans.app ).iteritems():
        job.add_parameter( name, value )
    job.add_parameter( 'paramfile', dumps( json_file_path ) )
    object_store_id = None
    for i, dataset in enumerate( data_list ):
        if folder:
            job.add_output_library_dataset( 'output%i' % i, dataset )
        else:
            job.add_output_dataset( 'output%i' % i, dataset )
        # Create an empty file immediately
        if not dataset.dataset.external_filename:
            dataset.dataset.object_store_id = object_store_id
            try:
                trans.app.object_store.create( dataset.dataset )
            except ObjectInvalid:
                raise Exception('Unable to create output dataset: object store is full')
            object_store_id = dataset.dataset.object_store_id
            trans.sa_session.add( dataset )
            # open( dataset.file_name, "w" ).close()
    job.object_store_id = object_store_id
    job.set_state( job.states.NEW )
    job.set_handler( tool.get_job_handler( None ) )
    if job_params:
        for name, value in job_params.iteritems():
            job.add_parameter( name, value )
    trans.sa_session.add( job )
    trans.sa_session.flush()

    # Queue the job for execution
    trans.app.job_queue.put( job.id, job.tool_id )
    trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
    output = odict()
    for i, v in enumerate( data_list ):
        output[ 'output%i' % i ] = v
    return job, output
    def execute(self, tool, trans, *args, **kwargs):
        #
        # Get genome to index.
        #
        incoming = kwargs['incoming']
        #
        # Create the job and output dataset objects
        #
        job = trans.app.model.Job()
        job.tool_id = tool.id
        job.user_id = incoming['user']
        start_job_state = job.state  # should be job.states.NEW
        job.state = job.states.WAITING  # we need to set job state to something other than NEW, or else when tracking jobs in db it will be picked up before we have added input / output parameters
        trans.sa_session.add(job)

        # Create dataset that will serve as archive.
        temp_dataset = trans.app.model.Dataset(
            state=trans.app.model.Dataset.states.NEW)
        trans.sa_session.add(temp_dataset)

        trans.sa_session.flush(
        )  # ensure job.id and archive_dataset.id are available
        trans.app.object_store.create(
            temp_dataset
        )  # set the object store id, create dataset (because galaxy likes having datasets)

        #
        # Setup job and job wrapper.
        #

        # Add association for keeping track of job, history, archive relationship.
        user = trans.sa_session.query(trans.app.model.User).get(
            int(incoming['user']))
        assoc = trans.app.model.GenomeIndexToolData( job=job, dataset=temp_dataset, fasta_path=incoming['path'], \
                                                        indexer=incoming['indexer'], user=user, \
                                                        deferred_job=kwargs['deferred'], transfer_job=kwargs['transfer'] )
        trans.sa_session.add(assoc)

        job_wrapper = GenomeIndexToolWrapper(job)
        cmd_line = job_wrapper.setup_job(assoc)
        #
        # Add parameters to job_parameter table.
        #

        # Set additional parameters.
        incoming['__GENOME_INDEX_COMMAND__'] = cmd_line
        for name, value in tool.params_to_strings(incoming,
                                                  trans.app).iteritems():
            job.add_parameter(name, value)

        job.state = start_job_state  # job inputs have been configured, restore initial job state
        trans.sa_session.flush()

        # Queue the job for execution
        trans.app.job_queue.put(job.id, tool)
        log.info("Added genome index job to the job queue, id: %s" %
                 str(job.id))

        return job, odict()
Пример #37
0
 def manage_repository_reviews(self, trans, mine=False, **kwd):
     # The value of the received id is the encoded repository id.
     params = util.Params(kwd)
     message = util.restore_text(params.get('message', ''))
     status = params.get('status', 'done')
     repository_id = kwd.get('id', None)
     if repository_id:
         repository = suc.get_repository_in_tool_shed(trans, repository_id)
         repo_dir = repository.repo_path(trans.app)
         repo = hg.repository(suc.get_configured_ui(), repo_dir)
         metadata_revision_hashes = [
             metadata_revision.changeset_revision
             for metadata_revision in repository.metadata_revisions
         ]
         reviewed_revision_hashes = [
             review.changeset_revision for review in repository.reviews
         ]
         reviews_dict = odict()
         for changeset in suc.get_reversed_changelog_changesets(repo):
             ctx = repo.changectx(changeset)
             changeset_revision = str(ctx)
             if changeset_revision in metadata_revision_hashes or changeset_revision in reviewed_revision_hashes:
                 rev, changeset_revision_label = suc.get_rev_label_from_changeset_revision(
                     repo, changeset_revision)
                 if changeset_revision in reviewed_revision_hashes:
                     # Find the review for this changeset_revision
                     repository_reviews = suc.get_reviews_by_repository_id_changeset_revision(
                         trans, repository_id, changeset_revision)
                     # Determine if the current user can add a review to this revision.
                     can_add_review = trans.user not in [
                         repository_review.user
                         for repository_review in repository_reviews
                     ]
                     repository_metadata = suc.get_repository_metadata_by_changeset_revision(
                         trans, repository_id, changeset_revision)
                     if repository_metadata:
                         repository_metadata_reviews = util.listify(
                             repository_metadata.reviews)
                     else:
                         repository_metadata_reviews = []
                 else:
                     repository_reviews = []
                     repository_metadata_reviews = []
                     can_add_review = True
                 installable = changeset_revision in metadata_revision_hashes
                 revision_dict = dict(
                     changeset_revision_label=changeset_revision_label,
                     repository_reviews=repository_reviews,
                     repository_metadata_reviews=repository_metadata_reviews,
                     installable=installable,
                     can_add_review=can_add_review)
                 reviews_dict[changeset_revision] = revision_dict
     return trans.fill_template(
         '/webapps/community/repository_review/reviews_of_repository.mako',
         repository=repository,
         reviews_dict=reviews_dict,
         mine=mine,
         message=message,
         status=status)
Пример #38
0
 def build_elements(self):
     elements = self._current_elements
     if self._nested_collection:
         new_elements = odict()
         for identifier, element in elements.items():
             new_elements[identifier] = element.build()
         elements = new_elements
     return elements
Пример #39
0
 def get_inital_values(self, data, trans):
     if self.other_values:
         rval = odict(self.other_values)
     else:
         rval = odict()
     rval.update(
         {
             'BASE_URL': trans.request.base,
             'APP': trans.app
         }
     )  # trans automatically appears as a response, need to add properties of trans that we want here
     for key, value in BASE_PARAMS.iteritems(
     ):  # add helper functions/variables
         rval[key] = value
     rval[
         DEFAULT_DATASET_NAME] = data  # always have the display dataset name available
     return rval
Пример #40
0
 def __init__(self, display_id, name, datatypes_registry, version=None):
     self.id = display_id
     self.name = name
     self.datatypes_registry = datatypes_registry
     if version is None:
         version = "1.0.0"
     self.version = version
     self.links = odict()
 def __init__( self, config_filename, root_dir, app ):
     self.all_external_service_types = odict()
     self.root_dir = root_dir
     self.app = app
     try:
         self.load_all( config_filename )
     except:
         log.exception( "ExternalServiceTypesCollection error reading %s", config_filename )
Пример #42
0
 def __init__( self, config_filename, root_dir, app ):
     self.all_external_service_types = odict()
     self.root_dir = root_dir
     self.app = app
     try:
         self.load_all( config_filename )
     except:
         log.exception( "ExternalServiceTypesCollection error reading %s", config_filename )
Пример #43
0
 def __init__(self, display_id, name, datatypes_registry, version=None):
     self.id = display_id
     self.name = name
     self.datatypes_registry = datatypes_registry
     if version is None:
         version = "1.0.0"
     self.version = version
     self.links = odict()
Пример #44
0
 def __load_elements(self, trans, element_identifiers, hide_source_items=False, copy_elements=False):
     elements = odict.odict()
     for element_identifier in element_identifiers:
         elements[element_identifier["name"]] = self.__load_element(trans,
                                                                    element_identifier=element_identifier,
                                                                    hide_source_items=hide_source_items,
                                                                    copy_elements=copy_elements)
     return elements
Пример #45
0
 def __init__( self, **kwd ):
     Bunch.__init__( self, **kwd )
     self.primary_file = None
     self.composite_files = odict()
     self.dbkey = None
     self.warnings = []
     
     self._temp_filenames = [] #store all created filenames here, delete on cleanup
Пример #46
0
    def _build_elements_from_rule_data(self, collection_type_description,
                                       rule_set, data, sources,
                                       handle_dataset):
        identifier_columns = rule_set.identifier_columns
        elements = odict.odict()
        for data_index, row_data in enumerate(data):
            # For each row, find place in depth for this element.
            collection_type_at_depth = collection_type_description
            elements_at_depth = elements

            for i, identifier_column in enumerate(identifier_columns):
                identifier = row_data[identifier_column]

                if i + 1 == len(identifier_columns):
                    # At correct final position in nested structure for this dataset.
                    if collection_type_at_depth.collection_type == "paired":
                        if identifier.lower() in ["f", "1", "r1", "forward"]:
                            identifier = "forward"
                        elif identifier.lower() in ["r", "2", "r2", "reverse"]:
                            identifier = "reverse"
                        else:
                            raise Exception(
                                "Unknown indicator of paired status encountered - only values of F, R, 1, 2, R1, R2, forward, or reverse are allowed."
                            )

                    elements_at_depth[identifier] = handle_dataset(
                        sources[data_index]["dataset"])
                else:
                    collection_type_at_depth = collection_type_at_depth.child_collection_type_description(
                    )
                    found = False
                    if identifier in elements_at_depth:
                        elements_at_depth = elements_at_depth[identifier][
                            "elements"]
                        found = True

                    if not found:
                        sub_collection = {}
                        sub_collection["src"] = "new_collection"
                        sub_collection[
                            "collection_type"] = collection_type_at_depth.collection_type
                        sub_collection["elements"] = odict.odict()
                        elements_at_depth[identifier] = sub_collection
                        elements_at_depth = sub_collection["elements"]

        return elements
Пример #47
0
 def build_elements(self):
     elements = self._current_elements
     if self._nested_collection:
         new_elements = odict()
         for identifier, element in elements.items():
             new_elements[identifier] = element.build()
         elements = new_elements
     return elements
Пример #48
0
 def __init__(self, display_application):
     self.display_application = display_application
     self.parameters = odict(
     )  # parameters are populated in order, allowing lower listed ones to have values of higher listed ones
     self.url_param_name_map = {}
     self.url = None
     self.id = None
     self.name = None
Пример #49
0
    def execute(
        self,
        tool,
        trans,
        incoming={},
        set_output_hid=False,
        overwrite=True,
        history=None,
        job_params=None,
        mapping_over_collection=False,
        execution_cache=None,
        **kwargs
    ):
        if execution_cache is None:
            execution_cache = ToolExecutionCache(trans)

        current_user_roles = execution_cache.current_user_roles
        history, inp_data, inp_dataset_collections = self._collect_inputs(
            tool, trans, incoming, history, current_user_roles
        )

        # Build name for output datasets based on tool name and input names
        on_text = self._get_on_text(inp_data)

        # wrapped params are used by change_format action and by output.label; only perform this wrapping once, as needed
        wrapped_params = self._wrapped_params(trans, tool, incoming)

        out_data = odict()
        input_collections = dict([(k, v[0][0]) for k, v in inp_dataset_collections.iteritems()])
        output_collections = OutputCollections(
            trans,
            history,
            tool=tool,
            tool_action=self,
            input_collections=input_collections,
            mapping_over_collection=mapping_over_collection,
            on_text=on_text,
            incoming=incoming,
            params=wrapped_params.params,
            job_params=job_params,
        )

        #
        # Create job.
        #
        job, galaxy_session = self._new_job_for_session(trans, tool, history)
        self._produce_outputs(trans, tool, out_data, output_collections, incoming=incoming, history=history)
        self._record_inputs(trans, tool, job, incoming, inp_data, inp_dataset_collections, current_user_roles)
        self._record_outputs(job, out_data, output_collections)
        job.state = job.states.OK
        trans.sa_session.add(job)
        trans.sa_session.flush()  # ensure job.id are available

        # Queue the job for execution
        # trans.app.job_queue.put( job.id, tool.id )
        # trans.log_event( "Added database job action to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
        log.info("Calling produce_outputs, tool is %s" % tool)
        return job, out_data
Пример #50
0
    def __init__(self, config, config_dict, fsmon=False):
        """The default contructor. Extends `NestedObjectStore`."""
        super(HierarchicalObjectStore, self).__init__(config)

        backends = odict()
        for order, backend_def in enumerate(config_dict["backends"]):
            backends[order] = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)

        self.backends = backends
Пример #51
0
 def find_files(self, output_name, collection, dataset_collectors):
     filenames = odict.odict()
     for discovered_file in discover_files(output_name,
                                           self.tool_provided_metadata,
                                           dataset_collectors,
                                           self.job_working_directory,
                                           collection):
         filenames[discovered_file.path] = discovered_file
     return filenames
Пример #52
0
 def __init__(self, providers=None):
     if providers:
         self.providers = providers
     else:
         self.providers = odict()
     self._banned_identifiers = [
         provider.op_endpoint_url for provider in self.providers.values()
         if provider.never_associate_with_user
     ]
Пример #53
0
 def __init__(self):
     self.repository_types_by_label = odict()
     self.repository_types_by_label["unrestricted"] = unrestricted.Unrestricted()
     self.repository_types_by_label[
         "repository_suite_definition"
     ] = repository_suite_definition.RepositorySuiteDefinition()
     self.repository_types_by_label[
         "tool_dependency_definition"
     ] = tool_dependency_definition.ToolDependencyDefinition()
Пример #54
0
    def __init__(self, config, config_dict, fsmon=False):
        """The default contructor. Extends `NestedObjectStore`."""
        super(HierarchicalObjectStore, self).__init__(config)

        backends = odict()
        for order, backend_def in enumerate(config_dict["backends"]):
            backends[order] = build_object_store_from_config(config, config_dict=backend_def, fsmon=fsmon)

        self.backends = backends
Пример #55
0
    def __init__(self, trans, history):
        if not history:
            history = trans.get_history()
        self.history = history
        self.warnings = set()
        self.jobs = odict()
        self.implicit_map_jobs = []
        self.collection_types = {}

        self.__summarize()
Пример #56
0
 def __init__(self):
     self.repository_types_by_label = odict()
     self.repository_types_by_label[
         'unrestricted'] = unrestricted.Unrestricted()
     self.repository_types_by_label[
         'repository_suite_definition'] = repository_suite_definition.RepositorySuiteDefinition(
         )
     self.repository_types_by_label[
         'tool_dependency_definition'] = tool_dependency_definition.ToolDependencyDefinition(
         )
Пример #57
0
 def __init__(self, app, xml_filename=None, conf_watchers=None):
     self.app = app
     self.data_managers = odict()
     self.managed_data_tables = odict()
     self.tool_path = None
     self._reload_count = 0
     self.filename = xml_filename or self.app.config.data_manager_config_file
     for filename in util.listify(self.filename):
         if not filename:
             continue
         self.load_from_xml(filename)
     if self.app.config.shed_data_manager_config_file:
         self.load_from_xml(self.app.config.shed_data_manager_config_file,
                            store_tool_path=False,
                            replace_existing=True)
     if conf_watchers:
         self.conf_watchers = conf_watchers
     else:
         self.conf_watchers = self.get_conf_watchers()
Пример #58
0
 def __init__(self, config, config_xml=None, fsmon=False):
     super(HierarchicalObjectStore, self).__init__(config,
                                                   config_xml=config_xml)
     self.backends = odict()
     for b in sorted(config_xml.find('backends'),
                     key=lambda b: int(b.get('order'))):
         self.backends[int(
             b.get('order'))] = build_object_store_from_config(config,
                                                               fsmon=fsmon,
                                                               config_xml=b)
Пример #59
0
 def load_datatype_converters( self, toolbox, installed_repository_dict=None, deactivate=False ):
     """
     If deactivate is False, add datatype converters from self.converters or self.proprietary_converters
     to the calling app's toolbox.  If deactivate is True, eliminates relevant converters from the calling
     app's toolbox.
     """   
     if installed_repository_dict:
         # Load converters defined by datatypes_conf.xml included in installed tool shed repository.
         converters = self.proprietary_converters
     else:
         # Load converters defined by local datatypes_conf.xml.
         converters = self.converters
     for elem in converters:
         tool_config = elem[0]
         source_datatype = elem[1]
         target_datatype = elem[2]
         if installed_repository_dict:
             converter_path = installed_repository_dict[ 'converter_path' ]
             config_path = os.path.join( converter_path, tool_config )
         else:
             config_path = os.path.join( self.converters_path, tool_config )
         try:
             converter = toolbox.load_tool( config_path )
             if installed_repository_dict:
                 # If the converter is included in an installed tool shed repository, set the tool
                 # shed related tool attributes.
                 converter.tool_shed = installed_repository_dict[ 'tool_shed' ]
                 converter.repository_name = installed_repository_dict[ 'repository_name' ]
                 converter.repository_owner = installed_repository_dict[ 'repository_owner' ]
                 converter.installed_changeset_revision = installed_repository_dict[ 'installed_changeset_revision' ]
                 converter.old_id = converter.id
                 # The converter should be included in the list of tools defined in tool_dicts.
                 tool_dicts = installed_repository_dict[ 'tool_dicts' ]
                 for tool_dict in tool_dicts:
                     if tool_dict[ 'id' ] == converter.id:
                         converter.guid = tool_dict[ 'guid' ]
                         converter.id = tool_dict[ 'guid' ]
                         break
             if deactivate:
                 del toolbox.tools_by_id[ converter.id ]
                 if source_datatype in self.datatype_converters:
                     del self.datatype_converters[ source_datatype ][ target_datatype ]
                 self.log.debug( "Deactivated converter: %s", converter.id )
             else:
                 toolbox.tools_by_id[ converter.id ] = converter
                 if source_datatype not in self.datatype_converters:
                     self.datatype_converters[ source_datatype ] = odict()
                 self.datatype_converters[ source_datatype ][ target_datatype ] = converter
                 self.log.debug( "Loaded converter: %s", converter.id )
         except Exception, e:
             if deactivate:
                 self.log.exception( "Error deactivating converter (%s): %s" % ( config_path, str( e ) ) )
             else:
                 self.log.exception( "Error loading converter (%s): %s" % ( config_path, str( e ) ) )
Пример #60
0
    def __init__(self,
                 job_working_directory,
                 has_collection,
                 dataset_paths=[],
                 **kwargs):
        super(DatasetCollectionWrapper, self).__init__()
        self.job_working_directory = job_working_directory
        self._dataset_elements_cache = {}
        self.dataset_paths = dataset_paths
        self.kwargs = kwargs

        if has_collection is None:
            self.__input_supplied = False
            return
        else:
            self.__input_supplied = True

        if hasattr(has_collection, "name"):
            # It is a HistoryDatasetCollectionAssociation
            collection = has_collection.collection
            self.name = has_collection.name
        elif hasattr(has_collection, "child_collection"):
            # It is a DatasetCollectionElement instance referencing another collection
            collection = has_collection.child_collection
            self.name = has_collection.element_identifier
        else:
            collection = has_collection
            self.name = None
        self.collection = collection

        elements = collection.elements
        element_instances = odict.odict()

        element_instance_list = []
        for dataset_collection_element in elements:
            element_object = dataset_collection_element.element_object
            element_identifier = dataset_collection_element.element_identifier

            if dataset_collection_element.is_collection:
                element_wrapper = DatasetCollectionWrapper(
                    job_working_directory, dataset_collection_element,
                    dataset_paths, **kwargs)
            else:
                element_wrapper = self._dataset_wrapper(
                    element_object,
                    dataset_paths,
                    identifier=element_identifier,
                    **kwargs)

            element_instances[element_identifier] = element_wrapper
            element_instance_list.append(element_wrapper)

        self.__element_instances = element_instances
        self.__element_instance_list = element_instance_list