Esempio n. 1
0
 def get_initial_value(self, trans, context):
     rval = {}
     child_context = ExpressionContext(rval, context)
     for child_input in self.inputs.values():
         rval[child_input.name] = child_input.get_initial_value(
             trans, child_context)
     return rval
Esempio n. 2
0
 def get_initial_value(self, trans, context):
     if self.inputs is None:
         raise Exception("Must set 'inputs' attribute to use.")
     rval: Dict[str, Any] = {}
     child_context = ExpressionContext(rval, context)
     for child_input in self.inputs.values():
         rval[child_input.name] = child_input.get_initial_value(
             trans, child_context)
     return rval
Esempio n. 3
0
 def populate_state(trans, inputs, state, incoming, prefix="", context=None ):
     errors = dict()
     context = ExpressionContext(state, context)
     for input in inputs.itervalues():
         key = prefix + input.name
         if input.type == 'repeat':
             group_state = state[input.name]
             group_errors = []
             rep_index = 0
             del group_state[:]
             while True:
                 rep_name = "%s_%d" % (key, rep_index)
                 if not any([incoming_key.startswith(rep_name) for incoming_key in incoming.keys()]):
                     break
                 if rep_index < input.max:
                     new_state = {}
                     new_state['__index__'] = rep_index
                     initialize_state(trans, input.inputs, new_state, context)
                     group_state.append(new_state)
                     group_errors.append({})
                     rep_errors = populate_state(trans, input.inputs, new_state, incoming, prefix=rep_name + "|", context=context)
                     if rep_errors:
                         group_errors[rep_index].update( rep_errors )
                 else:
                     group_errors[-1] = { '__index__': 'Cannot add repeat (max size=%i).' % input.max }
                 rep_index += 1
         elif input.type == 'conditional':
             group_state = state[input.name]
             group_prefix = "%s|" % ( key )
             test_param_key = group_prefix + input.test_param.name
             default_value = incoming.get(test_param_key, group_state.get(input.test_param.name, None))
             value, error = check_state(trans, input.test_param, default_value, context)
             if error:
                 errors[input.name] = [error]
             else:
                 current_case = input.get_current_case(value, trans)
                 group_state = state[input.name] = {}
                 initialize_state(trans, input.cases[current_case].inputs, group_state, context)
                 group_errors = populate_state( trans, input.cases[current_case].inputs, group_state, incoming, prefix=group_prefix, context=context)
                 if group_errors:
                     errors[input.name] = group_errors
                 group_state['__current_case__'] = current_case
             group_state[input.test_param.name] = value
         else:
             default_value = incoming.get(key, state.get(input.name, None))
             value, error = check_state(trans, input, default_value, context)
             if error:
                 errors[input.name] = error
             state[input.name] = value
     return errors
Esempio n. 4
0
def new_state(trans, tool, invalid=False):
    """Create a new `DefaultToolState` for the received tool.  Only inputs on the first page will be initialized."""
    state = galaxy.tools.DefaultToolState()
    state.inputs = {}
    if invalid:
        # We're attempting to display a tool in the tool shed that has been determined to have errors, so is invalid.
        return state
    inputs = tool.inputs_by_page[0]
    context = ExpressionContext(state.inputs, parent=None)
    for input in inputs.itervalues():
        try:
            state.inputs[input.name] = input.get_initial_value(trans, context)
        except:
            state.inputs[input.name] = []
    return state
Esempio n. 5
0
 def get_initial_value(self, trans, context):
     # State for a conditional is a plain dictionary.
     rval = {}
     # Get the default value for the 'test element' and use it
     # to determine the current case
     test_value = self.test_param.get_initial_value(trans, context)
     current_case = self.get_current_case(test_value)
     # Store the current case in a special value
     rval['__current_case__'] = current_case
     # Store the value of the test element
     rval[self.test_param.name] = test_value
     # Fill in state for selected case
     child_context = ExpressionContext(rval, context)
     for child_input in self.cases[current_case].inputs.values():
         rval[child_input.name] = child_input.get_initial_value(trans, child_context)
     return rval
Esempio n. 6
0
        def iterate(group_inputs, inputs, tool_state, errors, other_values=None):
            other_values = ExpressionContext( tool_state, other_values )
            for input_index, input in enumerate( inputs.itervalues() ):
                # create model dictionary
                group_inputs[input_index] = input.to_dict(trans)
                if group_inputs[input_index] is None:
                    continue

                # identify stat for subsection/group
                group_state = tool_state[input.name]

                # iterate and update values
                if input.type == 'repeat':
                    group_cache = group_inputs[input_index]['cache'] = {}
                    for i in range( len( group_state ) ):
                        group_cache[i] = {}
                        group_errors = errors[input.name][i] if input.name in errors else dict()
                        iterate( group_cache[i], input.inputs, group_state[i], group_errors, other_values )
                elif input.type == 'conditional':
                    try:
                        test_param = group_inputs[input_index]['test_param']
                        test_param['value'] = convert(group_state[test_param['name']])
                    except Exception:
                        pass
                    i = group_state['__current_case__']
                    group_errors = errors.get( input.name, {} )
                    iterate(group_inputs[input_index]['cases'][i]['inputs'], input.cases[i].inputs, group_state, group_errors, other_values)
                else:
                    # create input dictionary, try to pass other_values if to_dict function supports it e.g. dynamic options
                    try:
                        group_inputs[input_index] = input.to_dict(trans, other_values=other_values)
                    except Exception:
                        pass

                    # update input value from tool state
                    try:
                        group_inputs[input_index]['value'] = tool_state[group_inputs[input_index]['name']]
                    except Exception:
                        pass

                    # sanitize if value exists
                    if group_inputs[input_index]['value']:
                        group_inputs[input_index] = sanitize(group_inputs[input_index])
Esempio n. 7
0
def new_state(trans, tool, invalid=False):
    """Create a new `DefaultToolState` for the received tool.  Only inputs on the first page will be initialized."""
    state = galaxy.tools.DefaultToolState()
    state.inputs = {}
    if invalid:
        # We're attempting to display a tool in the tool shed that has been determined to have errors, so is invalid.
        return state
    try:
        # Attempt to generate the tool state using the standard Galaxy-side code
        return tool.new_state(trans)
    except Exception as e:
        # Fall back to building tool state as below
        log.debug('Failed to build tool state for tool "%s" using standard method, will try to fall back on custom method: %s', tool.id, e)
    inputs = tool.inputs_by_page[0]
    context = ExpressionContext(state.inputs, parent=None)
    for input in inputs.values():
        try:
            state.inputs[input.name] = input.get_initial_value(trans, context)
        except Exception:
            # FIXME: not all values should be an empty list
            state.inputs[input.name] = []
    return state
    def finish( self, stdout, stderr ):
        """
        Called to indicate that the associated command has been run. Updates
        the output datasets based on stderr and stdout from the command, and
        the contents of the output files.
        """
        # default post job setup
        self.sa_session.expunge_all()
        job = self.get_job()

        try:
            self.reclaim_ownership()
        except:
            self.fail( job.info )
            log.exception( '(%s) Failed to change ownership of %s, failing' % ( job.id, self.working_directory ) )

        # if the job was deleted, don't finish it
        if job.state == job.states.DELETED or job.state == job.states.ERROR:
            #ERROR at this point means the job was deleted by an administrator.
            return self.fail( job.info )
        if stderr:
            job.state = job.states.ERROR
        else:
            job.state = job.states.OK
        if self.version_string_cmd:
            version_filename = self.get_version_string_path()
            if os.path.exists(version_filename):
                self.version_string = open(version_filename).read()
                os.unlink(version_filename)

        if self.app.config.outputs_to_working_directory and not self.__link_file_check():
            for dataset_path in self.get_output_fnames():
                try:
                    shutil.move( dataset_path.false_path, dataset_path.real_path )
                    log.debug( "finish(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
                except ( IOError, OSError ):
                    # this can happen if Galaxy is restarted during the job's
                    # finish method - the false_path file has already moved,
                    # and when the job is recovered, it won't be found.
                    if os.path.exists( dataset_path.real_path ) and os.stat( dataset_path.real_path ).st_size > 0:
                        log.warning( "finish(): %s not found, but %s is not empty, so it will be used instead" % ( dataset_path.false_path, dataset_path.real_path ) )
                    else:
                        return self.fail( "Job %s's output dataset(s) could not be read" % job.id )
        job_context = ExpressionContext( dict( stdout = stdout, stderr = stderr ) )
        job_tool = self.app.toolbox.tools_by_id.get( job.tool_id, None )
        def in_directory( file, directory ):
            # Make both absolute.
            directory = os.path.abspath( directory )
            file = os.path.abspath( file )

            #Return true, if the common prefix of both is equal to directory
            #e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
            return os.path.commonprefix( [ file, directory ] ) == directory
        for dataset_assoc in job.output_datasets + job.output_library_datasets:
            context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
            #should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
            for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: #need to update all associated output hdas, i.e. history was shared with job running
                #
                # If HDA is to be copied from the working directory, do it now so that other attributes are correctly set.
                #
                if isinstance( dataset, model.HistoryDatasetAssociation ):
                    joda = self.sa_session.query( model.JobToOutputDatasetAssociation ).filter_by( job=job, dataset=dataset ).first()
                    if joda and job_tool:
                        hda_tool_output = job_tool.outputs.get( joda.name, None )
                        if hda_tool_output and hda_tool_output.from_work_dir:
                            # Copy from working dir to HDA.
                            source_file = os.path.join( os.path.abspath( self.working_directory ), hda_tool_output.from_work_dir )
                            if in_directory( source_file, self.working_directory ):
                                try:
                                    shutil.move( source_file, dataset.file_name )
                                    log.debug( "finish(): Moved %s to %s as directed by from_work_dir" % ( source_file, dataset.file_name ) )
                                except ( IOError, OSError ):
                                    log.debug( "finish(): Could not move %s to %s as directed by from_work_dir" % ( source_file, dataset.file_name ) )
                            else:
                                # Security violation.
                                log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, self.working_directory ) )

                dataset.blurb = 'done'
                dataset.peek  = 'no peek'
                dataset.info = ( dataset.info  or '' ) + context['stdout'] + context['stderr']
                dataset.tool_version = self.version_string
                dataset.set_size()
                # Update (non-library) job output datasets through the object store
                if dataset not in job.output_library_datasets:
                    self.app.object_store.update_from_file(dataset.dataset, create=True)
                if context['stderr']:
                    dataset.blurb = "error"
                elif dataset.has_data():
                    # If the tool was expected to set the extension, attempt to retrieve it
                    if dataset.ext == 'auto':
                        dataset.extension = context.get( 'ext', 'data' )
                        dataset.init_meta( copy_from=dataset )
                    #if a dataset was copied, it won't appear in our dictionary:
                    #either use the metadata from originating output dataset, or call set_meta on the copies
                    #it would be quicker to just copy the metadata from the originating output dataset,
                    #but somewhat trickier (need to recurse up the copied_from tree), for now we'll call set_meta()
                    if not self.app.config.set_metadata_externally or \
                     ( not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) \
                       and self.app.config.retry_metadata_internally ):
                        dataset.set_meta( overwrite = False )
                    elif not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and not context['stderr']:
                        dataset._state = model.Dataset.states.FAILED_METADATA
                    else:
                        #load metadata from file
                        #we need to no longer allow metadata to be edited while the job is still running,
                        #since if it is edited, the metadata changed on the running output will no longer match
                        #the metadata that was stored to disk for use via the external process,
                        #and the changes made by the user will be lost, without warning or notice
                        dataset.metadata.from_JSON_dict( self.external_output_metadata.get_output_filenames_by_dataset( dataset, self.sa_session ).filename_out )
                    try:
                        assert context.get( 'line_count', None ) is not None
                        if ( not dataset.datatype.composite_type and dataset.dataset.is_multi_byte() ) or self.tool.is_multi_byte:
                            dataset.set_peek( line_count=context['line_count'], is_multi_byte=True )
                        else:
                            dataset.set_peek( line_count=context['line_count'] )
                    except:
                        if ( not dataset.datatype.composite_type and dataset.dataset.is_multi_byte() ) or self.tool.is_multi_byte:
                            dataset.set_peek( is_multi_byte=True )
                        else:
                            dataset.set_peek()
                    try:
                        # set the name if provided by the tool
                        dataset.name = context['name']
                    except:
                        pass
                else:
                    dataset.blurb = "empty"
                    if dataset.ext == 'auto':
                        dataset.extension = 'txt'
                self.sa_session.add( dataset )
            if context['stderr']:
                dataset_assoc.dataset.dataset.state = model.Dataset.states.ERROR
            else:
                dataset_assoc.dataset.dataset.state = model.Dataset.states.OK
            # If any of the rest of the finish method below raises an
            # exception, the fail method will run and set the datasets to
            # ERROR.  The user will never see that the datasets are in error if
            # they were flushed as OK here, since upon doing so, the history
            # panel stops checking for updates.  So allow the
            # self.sa_session.flush() at the bottom of this method set
            # the state instead.

        for pja in job.post_job_actions:
            ActionBox.execute(self.app, self.sa_session, pja.post_job_action, job)
        # Flush all the dataset and job changes above.  Dataset state changes
        # will now be seen by the user.
        self.sa_session.flush()
        # Save stdout and stderr
        if len( stdout ) > 32768:
            log.error( "stdout for job %d is greater than 32K, only first part will be logged to database" % job.id )
        job.stdout = stdout[:32768]
        if len( stderr ) > 32768:
            log.error( "stderr for job %d is greater than 32K, only first part will be logged to database" % job.id )
        job.stderr = stderr[:32768]
        # custom post process setup
        inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
        out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
        inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] )
        out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
        param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
        param_dict = self.tool.params_from_strings( param_dict, self.app )
        # Check for and move associated_files
        self.tool.collect_associated_files(out_data, self.working_directory)
        gitd = self.sa_session.query( model.GenomeIndexToolData ).filter_by( job=job ).first()
        if gitd:
            self.tool.collect_associated_files({'' : gitd}, self.working_directory)
        # Create generated output children and primary datasets and add to param_dict
        collected_datasets = {'children':self.tool.collect_child_datasets(out_data, self.working_directory),'primary':self.tool.collect_primary_datasets(out_data, self.working_directory)}
        param_dict.update({'__collected_datasets__':collected_datasets})
        # Certain tools require tasks to be completed after job execution
        # ( this used to be performed in the "exec_after_process" hook, but hooks are deprecated ).
        self.tool.exec_after_process( self.queue.app, inp_data, out_data, param_dict, job = job )
        # Call 'exec_after_process' hook
        self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data,
                             out_data=out_data, param_dict=param_dict,
                             tool=self.tool, stdout=stdout, stderr=stderr )
        job.command_line = self.command_line

        bytes = 0
        # Once datasets are collected, set the total dataset size (includes extra files)
        for dataset_assoc in job.output_datasets:
            dataset_assoc.dataset.dataset.set_total_size()
            bytes += dataset_assoc.dataset.dataset.get_total_size()

        if job.user:
            job.user.total_disk_usage += bytes

        # fix permissions
        for path in [ dp.real_path for dp in self.get_output_fnames() ]:
            util.umask_fix_perms( path, self.app.config.umask, 0666, self.app.config.gid )
        self.sa_session.flush()
        log.debug( 'job %d ended' % self.job_id )
        if self.app.config.cleanup_job == 'always' or ( not stderr and self.app.config.cleanup_job == 'onsuccess' ):
            self.cleanup()
Esempio n. 9
0
def populate_state(request_context,
                   inputs,
                   incoming,
                   state,
                   errors={},
                   prefix='',
                   context=None,
                   check=True):
    """
    Populates nested state dict from incoming parameter values.
    >>> from xml.etree.ElementTree import XML
    >>> from galaxy.util.bunch import Bunch
    >>> from galaxy.util.odict import odict
    >>> from galaxy.tools.parameters.basic import TextToolParameter, BooleanToolParameter
    >>> from galaxy.tools.parameters.grouping import Repeat
    >>> trans = Bunch( workflow_building_mode=False )
    >>> a = TextToolParameter( None, XML( '<param name="a"/>' ) )
    >>> b = Repeat()
    >>> b.min = 0
    >>> b.max = 1
    >>> c = TextToolParameter( None, XML( '<param name="c"/>' ) )
    >>> d = Repeat()
    >>> d.min = 0
    >>> d.max = 1
    >>> e = TextToolParameter( None, XML( '<param name="e"/>' ) )
    >>> f = Conditional()
    >>> g = BooleanToolParameter( None, XML( '<param name="g"/>' ) )
    >>> h = TextToolParameter( None, XML( '<param name="h"/>' ) )
    >>> i = TextToolParameter( None, XML( '<param name="i"/>' ) )
    >>> b.name = 'b'
    >>> b.inputs = odict([ ('c', c), ('d', d) ])
    >>> d.name = 'd'
    >>> d.inputs = odict([ ('e', e), ('f', f) ])
    >>> f.test_param = g
    >>> f.name = 'f'
    >>> f.cases = [ Bunch( value='true', inputs= { 'h': h } ), Bunch( value='false', inputs= { 'i': i } ) ]
    >>> inputs = odict([('a',a),('b',b)])
    >>> flat = odict([ ('a', 1 ), ( 'b_0|c', 2 ), ( 'b_0|d_0|e', 3 ), ( 'b_0|d_0|f|h', 4 ), ( 'b_0|d_0|f|g', True ) ])
    >>> state = odict()
    >>> populate_state( trans, inputs, flat, state, check=False )
    >>> print state[ 'a' ]
    1
    >>> print state[ 'b' ][ 0 ][ 'c' ]
    2
    >>> print state[ 'b' ][ 0 ][ 'd' ][ 0 ][ 'e' ]
    3
    >>> print state[ 'b' ][ 0 ][ 'd' ][ 0 ][ 'f' ][ 'h' ]
    4
    """
    context = ExpressionContext(state, context)
    for input in inputs.values():
        state[input.name] = input.get_initial_value(request_context, context)
        key = prefix + input.name
        group_state = state[input.name]
        group_prefix = '%s|' % (key)
        if input.type == 'repeat':
            rep_index = 0
            del group_state[:]
            while True:
                rep_prefix = '%s_%d' % (key, rep_index)
                if not any(
                        incoming_key.startswith(rep_prefix) for incoming_key in
                        incoming.keys()) and rep_index >= input.min:
                    break
                if rep_index < input.max:
                    new_state = {'__index__': rep_index}
                    group_state.append(new_state)
                    populate_state(request_context,
                                   input.inputs,
                                   incoming,
                                   new_state,
                                   errors,
                                   prefix=rep_prefix + '|',
                                   context=context,
                                   check=check)
                rep_index += 1
        elif input.type == 'conditional':
            if input.value_ref and not input.value_ref_in_group:
                test_param_key = prefix + input.test_param.name
            else:
                test_param_key = group_prefix + input.test_param.name
            test_param_value = incoming.get(
                test_param_key, group_state.get(input.test_param.name))
            value, error = check_param(
                request_context, input.test_param, test_param_value,
                context) if check else [test_param_value, None]
            if error:
                errors[test_param_key] = error
            else:
                try:
                    current_case = input.get_current_case(value)
                    group_state = state[input.name] = {}
                    populate_state(request_context,
                                   input.cases[current_case].inputs,
                                   incoming,
                                   group_state,
                                   errors,
                                   prefix=group_prefix,
                                   context=context,
                                   check=check)
                    group_state['__current_case__'] = current_case
                except Exception:
                    errors[
                        test_param_key] = 'The selected case is unavailable/invalid.'
                    pass
            group_state[input.test_param.name] = value
        elif input.type == 'section':
            populate_state(request_context,
                           input.inputs,
                           incoming,
                           group_state,
                           errors,
                           prefix=group_prefix,
                           context=context,
                           check=check)
        elif input.type == 'upload_dataset':
            d_type = input.get_datatype(request_context, context=context)
            writable_files = d_type.writable_files
            while len(group_state) > len(writable_files):
                del group_state[-1]
            while len(writable_files) > len(group_state):
                new_state = {'__index__': len(group_state)}
                for upload_item in input.inputs.values():
                    new_state[
                        upload_item.name] = upload_item.get_initial_value(
                            request_context, context)
                group_state.append(new_state)
            for i, rep_state in enumerate(group_state):
                rep_index = rep_state['__index__']
                rep_prefix = '%s_%d|' % (key, rep_index)
                populate_state(request_context,
                               input.inputs,
                               incoming,
                               rep_state,
                               errors,
                               prefix=rep_prefix,
                               context=context,
                               check=check)
        else:
            param_value = _get_incoming_value(incoming, key,
                                              state.get(input.name))
            value, error = check_param(
                request_context, input, param_value,
                context) if check else [param_value, None]
            if error:
                errors[key] = error
            state[input.name] = value
Esempio n. 10
0
def render_do_inputs(context,
                     inputs,
                     tool_state,
                     errors,
                     prefix,
                     other_values=None):
    context.caller_stack._push_frame()
    try:

        def row_for_param(prefix, param, parent_state, parent_errors,
                          other_values):
            return render_row_for_param(context, prefix, param, parent_state,
                                        parent_errors, other_values)

        def do_inputs(inputs, tool_state, errors, prefix, other_values=None):
            return render_do_inputs(context, inputs, tool_state, errors,
                                    prefix, other_values)

        len = context.get('len', UNDEFINED)
        range = context.get('range', UNDEFINED)
        dict = context.get('dict', UNDEFINED)
        str = context.get('str', UNDEFINED)
        enumerate = context.get('enumerate', UNDEFINED)
        trans = context.get('trans', UNDEFINED)
        ExpressionContext = context.get('ExpressionContext', UNDEFINED)
        __M_writer = context.writer()
        # SOURCE LINE 77
        __M_writer(u'\n      ')
        # SOURCE LINE 78
        other_values = ExpressionContext(tool_state, other_values)

        __M_writer(u'\n')
        # SOURCE LINE 79
        for input_index, input in enumerate(inputs.itervalues()):
            # SOURCE LINE 80
            if input.type == "repeat":
                # SOURCE LINE 81
                __M_writer(
                    u'              <div class="repeat-group">\n                  <div class="form-title-row"><b>'
                )
                # SOURCE LINE 82
                __M_writer(unicode(input.title_plural))
                __M_writer(u'</b></div>\n                  ')
                # SOURCE LINE 83
                repeat_state = tool_state[input.name]

                __M_writer(u'\n')
                # SOURCE LINE 84
                for i in range(len(repeat_state)):
                    # SOURCE LINE 85
                    __M_writer(
                        u'                    <div class="repeat-group-item">\n                    '
                    )
                    # SOURCE LINE 86

                    if input.name in errors:
                        rep_errors = errors[input.name][i]
                    else:
                        rep_errors = dict()
                    index = repeat_state[i]['__index__']

                    # SOURCE LINE 92
                    __M_writer(
                        u'\n                    <div class="form-title-row"><b>'
                    )
                    # SOURCE LINE 93
                    __M_writer(unicode(input.title))
                    __M_writer(u' ')
                    __M_writer(unicode(i + 1))
                    __M_writer(u'</b></div>\n                    ')
                    # SOURCE LINE 94
                    __M_writer(
                        unicode(
                            do_inputs(
                                input.inputs, repeat_state[i], rep_errors,
                                prefix + input.name + "_" + str(index) + "|",
                                other_values)))
                    __M_writer(
                        u'\n                    <div class="form-row"><input type="submit" name="'
                    )
                    # SOURCE LINE 95
                    __M_writer(unicode(prefix))
                    __M_writer(unicode(input.name))
                    __M_writer(u'_')
                    __M_writer(unicode(index))
                    __M_writer(u'_remove" value="Remove ')
                    __M_writer(unicode(input.title))
                    __M_writer(u' ')
                    __M_writer(unicode(i + 1))
                    __M_writer(u'"></div>\n                    </div>\n')
                # SOURCE LINE 98
                __M_writer(
                    u'                  <div class="form-row"><input type="submit" name="'
                )
                __M_writer(unicode(prefix))
                __M_writer(unicode(input.name))
                __M_writer(u'_add" value="Add new ')
                __M_writer(unicode(input.title))
                __M_writer(u'"></div>\n              </div>\n')
                # SOURCE LINE 100
            elif input.type == "conditional":
                # SOURCE LINE 101
                __M_writer(u'                ')

                group_state = tool_state[input.name]
                group_errors = errors.get(input.name, {})
                current_case = group_state['__current_case__']
                group_prefix = prefix + input.name + "|"

                # SOURCE LINE 106
                __M_writer(u'\n')
                # SOURCE LINE 107
                if input.value_ref_in_group:
                    # SOURCE LINE 108
                    __M_writer(u'                    ')
                    __M_writer(
                        unicode(
                            row_for_param(group_prefix, input.test_param,
                                          group_state, group_errors,
                                          other_values)))
                    __M_writer(u'\n')
                # SOURCE LINE 110
                __M_writer(u'                ')
                __M_writer(
                    unicode(
                        do_inputs(input.cases[current_case].inputs,
                                  group_state, group_errors, group_prefix,
                                  other_values)))
                __M_writer(u'\n')
                # SOURCE LINE 111
            elif input.type == "upload_dataset":
                # SOURCE LINE 112
                if input.get_datatype(
                        trans, other_values
                ).composite_type is None:  #have non-composite upload appear as before
                    # SOURCE LINE 113
                    __M_writer(u'                    ')

                    if input.name in errors:
                        rep_errors = errors[input.name][0]
                    else:
                        rep_errors = dict()

                    # SOURCE LINE 118
                    __M_writer(u'\n                  ')
                    # SOURCE LINE 119
                    __M_writer(
                        unicode(
                            do_inputs(input.inputs, tool_state[input.name][0],
                                      rep_errors,
                                      prefix + input.name + "_" + str(0) + "|",
                                      other_values)))
                    __M_writer(u'\n')
                    # SOURCE LINE 120
                else:
                    # SOURCE LINE 121
                    __M_writer(
                        u'                    <div class="repeat-group">\n                        <div class="form-title-row"><b>'
                    )
                    # SOURCE LINE 122
                    __M_writer(unicode(input.group_title(other_values)))
                    __M_writer(u'</b></div>\n                        ')
                    # SOURCE LINE 123

                    repeat_state = tool_state[input.name]

                    # SOURCE LINE 125
                    __M_writer(u'\n')
                    # SOURCE LINE 126
                    for i in range(len(repeat_state)):
                        # SOURCE LINE 127
                        __M_writer(
                            u'                          <div class="repeat-group-item">\n                          '
                        )
                        # SOURCE LINE 128

                        if input.name in errors:
                            rep_errors = errors[input.name][i]
                        else:
                            rep_errors = dict()
                        index = repeat_state[i]['__index__']

                        # SOURCE LINE 134
                        __M_writer(
                            u'\n                          <div class="form-title-row"><b>File Contents for '
                        )
                        # SOURCE LINE 135
                        __M_writer(
                            unicode(
                                input.title_by_index(trans, i, other_values)))
                        __M_writer(u'</b></div>\n                          ')
                        # SOURCE LINE 136
                        __M_writer(
                            unicode(
                                do_inputs(
                                    input.inputs, repeat_state[i], rep_errors,
                                    prefix + input.name + "_" + str(index) +
                                    "|", other_values)))
                        __M_writer(u'\n')
                        # SOURCE LINE 138
                        __M_writer(u'                          </div>\n')
                    # SOURCE LINE 141
                    __M_writer(u'                    </div>\n')
                # SOURCE LINE 143
            else:
                # SOURCE LINE 144
                __M_writer(u'                ')
                __M_writer(
                    unicode(
                        row_for_param(prefix, input, tool_state, errors,
                                      other_values)))
                __M_writer(u'\n')
        # SOURCE LINE 147
        __M_writer(u'    ')
        return ''
    finally:
        context.caller_stack._pop_frame()
Esempio n. 11
0
def _populate_state_legacy(request_context,
                           inputs,
                           incoming,
                           state,
                           errors,
                           prefix='',
                           context=None,
                           check=True,
                           simple_errors=True):
    context = ExpressionContext(state, context)
    for input in inputs.values():
        state[input.name] = input.get_initial_value(request_context, context)
        key = prefix + input.name
        group_state = state[input.name]
        group_prefix = '%s|' % (key)
        if input.type == 'repeat':
            rep_index = 0
            del group_state[:]
            while True:
                rep_prefix = '%s_%d' % (key, rep_index)
                if not any(
                        incoming_key.startswith(rep_prefix) for incoming_key in
                        incoming.keys()) and rep_index >= input.min:
                    break
                if rep_index < input.max:
                    new_state = {'__index__': rep_index}
                    group_state.append(new_state)
                    _populate_state_legacy(request_context,
                                           input.inputs,
                                           incoming,
                                           new_state,
                                           errors,
                                           prefix=rep_prefix + '|',
                                           context=context,
                                           check=check,
                                           simple_errors=simple_errors)
                rep_index += 1
        elif input.type == 'conditional':
            if input.value_ref and not input.value_ref_in_group:
                test_param_key = prefix + input.test_param.name
            else:
                test_param_key = group_prefix + input.test_param.name
            test_param_value = incoming.get(
                test_param_key, group_state.get(input.test_param.name))
            value, error = check_param(
                request_context,
                input.test_param,
                test_param_value,
                context,
                simple_errors=simple_errors) if check else [
                    test_param_value, None
                ]
            if error:
                errors[test_param_key] = error
            else:
                try:
                    current_case = input.get_current_case(value)
                    group_state = state[input.name] = {}
                    _populate_state_legacy(request_context,
                                           input.cases[current_case].inputs,
                                           incoming,
                                           group_state,
                                           errors,
                                           prefix=group_prefix,
                                           context=context,
                                           check=check,
                                           simple_errors=simple_errors)
                    group_state['__current_case__'] = current_case
                except Exception:
                    errors[
                        test_param_key] = 'The selected case is unavailable/invalid.'
            group_state[input.test_param.name] = value
        elif input.type == 'section':
            _populate_state_legacy(request_context,
                                   input.inputs,
                                   incoming,
                                   group_state,
                                   errors,
                                   prefix=group_prefix,
                                   context=context,
                                   check=check,
                                   simple_errors=simple_errors)
        elif input.type == 'upload_dataset':
            file_count = input.get_file_count(request_context, context)
            while len(group_state) > file_count:
                del group_state[-1]
            while file_count > len(group_state):
                new_state = {'__index__': len(group_state)}
                for upload_item in input.inputs.values():
                    new_state[
                        upload_item.name] = upload_item.get_initial_value(
                            request_context, context)
                group_state.append(new_state)
            for rep_state in group_state:
                rep_index = rep_state['__index__']
                rep_prefix = '%s_%d|' % (key, rep_index)
                _populate_state_legacy(request_context,
                                       input.inputs,
                                       incoming,
                                       rep_state,
                                       errors,
                                       prefix=rep_prefix,
                                       context=context,
                                       check=check,
                                       simple_errors=simple_errors)
        else:
            param_value = _get_incoming_value(incoming, key,
                                              state.get(input.name))
            value, error = check_param(
                request_context,
                input,
                param_value,
                context,
                simple_errors=simple_errors) if check else [param_value, None]
            if error:
                errors[key] = error
            state[input.name] = value
Esempio n. 12
0
def visit_input_values(inputs,
                       input_values,
                       callback,
                       name_prefix='',
                       label_prefix='',
                       parent_prefix='',
                       context=None,
                       no_replacement_value=REPLACE_ON_TRUTHY):
    """
    Given a tools parameter definition (`inputs`) and a specific set of
    parameter `values`, call `callback` for each non-grouping parameter,
    passing the parameter object, value, a constructed unique name,
    and a display label.

    If the callback returns a value, it will be replace the old value.

    >>> from xml.etree.ElementTree import XML
    >>> from galaxy.util.bunch import Bunch
    >>> from galaxy.util.odict import odict
    >>> from galaxy.tools.parameters.basic import TextToolParameter, BooleanToolParameter
    >>> from galaxy.tools.parameters.grouping import Repeat
    >>> a = TextToolParameter( None, XML( '<param name="a"/>' ) )
    >>> b = Repeat()
    >>> c = TextToolParameter( None, XML( '<param name="c"/>' ) )
    >>> d = Repeat()
    >>> e = TextToolParameter( None, XML( '<param name="e"/>' ) )
    >>> f = Conditional()
    >>> g = BooleanToolParameter( None, XML( '<param name="g"/>' ) )
    >>> h = TextToolParameter( None, XML( '<param name="h"/>' ) )
    >>> i = TextToolParameter( None, XML( '<param name="i"/>' ) )
    >>> j = TextToolParameter( None, XML( '<param name="j"/>' ) )
    >>> b.name = b.title = 'b'
    >>> b.inputs = odict([ ('c', c), ('d', d) ])
    >>> d.name = d.title = 'd'
    >>> d.inputs = odict([ ('e', e), ('f', f) ])
    >>> f.test_param = g
    >>> f.name = 'f'
    >>> f.cases = [ Bunch( value='true', inputs= { 'h': h } ), Bunch( value='false', inputs= { 'i': i } ) ]
    >>>
    >>> def visitor( input, value, prefix, prefixed_name, prefixed_label, error, **kwargs ):
    ...     print 'name=%s, prefix=%s, prefixed_name=%s, prefixed_label=%s,value=%s' % ( input.name, prefix, prefixed_name, prefixed_label, value )
    ...     if error:
    ...         print error
    >>> inputs = odict([('a', a),('b', b)])
    >>> nested = odict([ ('a', 1), ('b', [ odict([('c', 3), ( 'd', [odict([ ('e', 5), ('f', odict([ ('g', True), ('h', 7) ])) ]) ])]) ]) ])
    >>> visit_input_values( inputs, nested, visitor )
    name=a, prefix=, prefixed_name=a, prefixed_label=a,value=1
    name=c, prefix=b_0|, prefixed_name=b_0|c, prefixed_label=b 1 > c,value=3
    name=e, prefix=b_0|d_0|, prefixed_name=b_0|d_0|e, prefixed_label=b 1 > d 1 > e,value=5
    name=g, prefix=b_0|d_0|, prefixed_name=b_0|d_0|f|g, prefixed_label=b 1 > d 1 > g,value=True
    name=h, prefix=b_0|d_0|, prefixed_name=b_0|d_0|f|h, prefixed_label=b 1 > d 1 > h,value=7
    >>> params_from_strings( inputs, params_to_strings( inputs, nested, None ), None )[ 'b' ][ 0 ][ 'd' ][ 0 ][ 'f' ][ 'g' ] is True
    True

    >>> # Conditional test parameter value does not match any case, warning is shown and child values are not visited
    >>> f.test_param = j
    >>> nested['b'][0]['d'][0]['f']['j'] = 'j'
    >>> visit_input_values( inputs, nested, visitor )
    name=a, prefix=, prefixed_name=a, prefixed_label=a,value=1
    name=c, prefix=b_0|, prefixed_name=b_0|c, prefixed_label=b 1 > c,value=3
    name=e, prefix=b_0|d_0|, prefixed_name=b_0|d_0|e, prefixed_label=b 1 > d 1 > e,value=5
    name=j, prefix=b_0|d_0|, prefixed_name=b_0|d_0|f|j, prefixed_label=b 1 > d 1 > j,value=j
    The selected case is unavailable/invalid.

    >>> # Test parameter missing in state, value error
    >>> del nested['b'][0]['d'][0]['f']['j']
    >>> visit_input_values( inputs, nested, visitor )
    name=a, prefix=, prefixed_name=a, prefixed_label=a,value=1
    name=c, prefix=b_0|, prefixed_name=b_0|c, prefixed_label=b 1 > c,value=3
    name=e, prefix=b_0|d_0|, prefixed_name=b_0|d_0|e, prefixed_label=b 1 > d 1 > e,value=5
    name=j, prefix=b_0|d_0|, prefixed_name=b_0|d_0|f|j, prefixed_label=b 1 > d 1 > j,value=None
    No value found for 'b 1 > d 1 > j'.

    >>> # Conditional parameter missing in state, value error
    >>> del nested['b'][0]['d'][0]['f']
    >>> visit_input_values( inputs, nested, visitor )
    name=a, prefix=, prefixed_name=a, prefixed_label=a,value=1
    name=c, prefix=b_0|, prefixed_name=b_0|c, prefixed_label=b 1 > c,value=3
    name=e, prefix=b_0|d_0|, prefixed_name=b_0|d_0|e, prefixed_label=b 1 > d 1 > e,value=5
    name=j, prefix=b_0|d_0|, prefixed_name=b_0|d_0|f|j, prefixed_label=b 1 > d 1 > j,value=None
    No value found for 'b 1 > d 1 > j'.

    >>> # Conditional input name has changed e.g. due to tool changes, key error
    >>> f.name = 'f_1'
    >>> visit_input_values( inputs, nested, visitor )
    name=a, prefix=, prefixed_name=a, prefixed_label=a,value=1
    name=c, prefix=b_0|, prefixed_name=b_0|c, prefixed_label=b 1 > c,value=3
    name=e, prefix=b_0|d_0|, prefixed_name=b_0|d_0|e, prefixed_label=b 1 > d 1 > e,value=5
    name=j, prefix=b_0|d_0|, prefixed_name=b_0|d_0|f_1|j, prefixed_label=b 1 > d 1 > j,value=None
    No value found for 'b 1 > d 1 > j'.

    >>> # Other parameters are missing in state
    >>> nested = odict([ ('b', [ odict([ ( 'd', [odict([ ('f', odict([ ('g', True), ('h', 7) ])) ]) ])]) ]) ])
    >>> visit_input_values( inputs, nested, visitor )
    name=a, prefix=, prefixed_name=a, prefixed_label=a,value=None
    No value found for 'a'.
    name=c, prefix=b_0|, prefixed_name=b_0|c, prefixed_label=b 1 > c,value=None
    No value found for 'b 1 > c'.
    name=e, prefix=b_0|d_0|, prefixed_name=b_0|d_0|e, prefixed_label=b 1 > d 1 > e,value=None
    No value found for 'b 1 > d 1 > e'.
    name=j, prefix=b_0|d_0|, prefixed_name=b_0|d_0|f_1|j, prefixed_label=b 1 > d 1 > j,value=None
    No value found for 'b 1 > d 1 > j'.
    """
    def callback_helper(input,
                        input_values,
                        name_prefix,
                        label_prefix,
                        parent_prefix,
                        context=None,
                        error=None):
        args = {
            'input': input,
            'parent': input_values,
            'value': input_values.get(input.name),
            'prefixed_name': '%s%s' % (name_prefix, input.name),
            'prefixed_label':
            '%s%s' % (label_prefix, input.label or input.name),
            'prefix': parent_prefix,
            'context': context,
            'error': error
        }
        if input.name not in input_values:
            args['error'] = 'No value found for \'%s\'.' % args.get(
                'prefixed_label')
        new_value = callback(**args)
        if no_replacement_value is REPLACE_ON_TRUTHY:
            replace = bool(new_value)
        else:
            replace = new_value != no_replacement_value
        if replace:
            input_values[input.name] = new_value

    def get_current_case(input, input_values):
        try:
            return input.get_current_case(input_values[input.test_param.name])
        except (KeyError, ValueError):
            return -1

    context = ExpressionContext(input_values, context)
    payload = {
        'context': context,
        'no_replacement_value': no_replacement_value
    }
    for input in inputs.values():
        if isinstance(input, Repeat) or isinstance(input, UploadDataset):
            values = input_values[input.name] = input_values.get(
                input.name, [])
            for i, d in enumerate(values):
                d['__index__'] = i
                new_name_prefix = name_prefix + '%s_%d|' % (input.name, i)
                new_label_prefix = label_prefix + '%s %d > ' % (input.title,
                                                                i + 1)
                visit_input_values(input.inputs,
                                   d,
                                   callback,
                                   new_name_prefix,
                                   new_label_prefix,
                                   parent_prefix=new_name_prefix,
                                   **payload)
        elif isinstance(input, Conditional):
            values = input_values[input.name] = input_values.get(
                input.name, {})
            new_name_prefix = name_prefix + input.name + '|'
            case_error = None if get_current_case(
                input,
                values) >= 0 else 'The selected case is unavailable/invalid.'
            callback_helper(input.test_param,
                            values,
                            new_name_prefix,
                            label_prefix,
                            parent_prefix=name_prefix,
                            context=context,
                            error=case_error)
            values['__current_case__'] = get_current_case(input, values)
            if values['__current_case__'] >= 0:
                visit_input_values(
                    input.cases[values['__current_case__']].inputs,
                    values,
                    callback,
                    new_name_prefix,
                    label_prefix,
                    parent_prefix=name_prefix,
                    **payload)
        elif isinstance(input, Section):
            values = input_values[input.name] = input_values.get(
                input.name, {})
            new_name_prefix = name_prefix + input.name + '|'
            visit_input_values(input.inputs,
                               values,
                               callback,
                               new_name_prefix,
                               label_prefix,
                               parent_prefix=name_prefix,
                               **payload)
        else:
            callback_helper(input,
                            input_values,
                            name_prefix,
                            label_prefix,
                            parent_prefix=parent_prefix,
                            context=context)
Esempio n. 13
0
def render_do_inputs(context,
                     inputs,
                     tool_state,
                     errors,
                     prefix,
                     other_values=None):
    context.caller_stack._push_frame()
    try:

        def row_for_param(prefix, param, parent_state, parent_errors,
                          other_values):
            return render_row_for_param(context, prefix, param, parent_state,
                                        parent_errors, other_values)

        h = context.get('h', UNDEFINED)

        def do_inputs(inputs, tool_state, errors, prefix, other_values=None):
            return render_do_inputs(context, inputs, tool_state, errors,
                                    prefix, other_values)

        len = context.get('len', UNDEFINED)
        range = context.get('range', UNDEFINED)
        dict = context.get('dict', UNDEFINED)
        str = context.get('str', UNDEFINED)
        enumerate = context.get('enumerate', UNDEFINED)
        trans = context.get('trans', UNDEFINED)
        ExpressionContext = context.get('ExpressionContext', UNDEFINED)
        __M_writer = context.writer()
        # SOURCE LINE 80
        __M_writer(u'\n      ')
        # SOURCE LINE 81
        other_values = ExpressionContext(tool_state, other_values)

        __M_writer(u'\n')
        # SOURCE LINE 82
        for input_index, input in enumerate(inputs.itervalues()):
            # SOURCE LINE 83
            if not input.visible:
                # SOURCE LINE 84
                __M_writer(u'                ')
                pass

                __M_writer(u'\n')
                # SOURCE LINE 85
            elif input.type == "repeat":
                # SOURCE LINE 86
                __M_writer(
                    u'              <div class="repeat-group">\n                  <div class="form-title-row"><b>'
                )
                # SOURCE LINE 87
                __M_writer(unicode(input.title_plural))
                __M_writer(u'</b></div>\n                  ')
                # SOURCE LINE 88
                repeat_state = tool_state[input.name]

                __M_writer(u'\n')
                # SOURCE LINE 89
                for i in range(len(repeat_state)):
                    # SOURCE LINE 90
                    __M_writer(
                        u'                    <div class="repeat-group-item">\n                        '
                    )
                    # SOURCE LINE 91

                    if input.name in errors:
                        rep_errors = errors[input.name][i]
                    else:
                        rep_errors = dict()
                    index = repeat_state[i]['__index__']

                    # SOURCE LINE 97
                    __M_writer(
                        u'\n                        <div class="form-title-row"><b>'
                    )
                    # SOURCE LINE 98
                    __M_writer(unicode(input.title))
                    __M_writer(u' ')
                    __M_writer(unicode(i + 1))
                    __M_writer(u'</b></div>\n                        ')
                    # SOURCE LINE 99
                    __M_writer(
                        unicode(
                            do_inputs(
                                input.inputs, repeat_state[i], rep_errors,
                                prefix + input.name + "_" + str(index) + "|",
                                other_values)))
                    __M_writer(
                        u'\n                        <div class="form-row"><input type="submit" name="'
                    )
                    # SOURCE LINE 100
                    __M_writer(unicode(prefix))
                    __M_writer(unicode(input.name))
                    __M_writer(u'_')
                    __M_writer(unicode(index))
                    __M_writer(u'_remove" value="Remove ')
                    __M_writer(unicode(input.title))
                    __M_writer(u' ')
                    __M_writer(unicode(i + 1))
                    __M_writer(u'"></div>\n                    </div>\n')
                    # SOURCE LINE 102
                    if rep_errors.has_key('__index__'):
                        # SOURCE LINE 103
                        __M_writer(
                            u'                        <div><img style="vertical-align: middle;" src="'
                        )
                        __M_writer(
                            unicode(
                                h.url_for('/static/style/error_small.png')))
                        __M_writer(
                            u'">&nbsp;<span style="vertical-align: middle;">')
                        __M_writer(unicode(rep_errors['__index__']))
                        __M_writer(u'</span></div>\n')
                        pass
                    pass
                # SOURCE LINE 106
                __M_writer(
                    u'                  <div class="form-row"><input type="submit" name="'
                )
                __M_writer(unicode(prefix))
                __M_writer(unicode(input.name))
                __M_writer(u'_add" value="Add new ')
                __M_writer(unicode(input.title))
                __M_writer(u'"></div>\n              </div>\n')
                # SOURCE LINE 108
            elif input.type == "conditional":
                # SOURCE LINE 109
                __M_writer(u'                ')

                group_state = tool_state[input.name]
                group_errors = errors.get(input.name, {})
                current_case = group_state['__current_case__']
                group_prefix = prefix + input.name + "|"

                # SOURCE LINE 114
                __M_writer(u'\n')
                # SOURCE LINE 115
                if input.value_ref_in_group:
                    # SOURCE LINE 116
                    __M_writer(u'                    ')
                    __M_writer(
                        unicode(
                            row_for_param(group_prefix, input.test_param,
                                          group_state, group_errors,
                                          other_values)))
                    __M_writer(u'\n')
                    pass
                # SOURCE LINE 118
                __M_writer(u'                ')
                __M_writer(
                    unicode(
                        do_inputs(input.cases[current_case].inputs,
                                  group_state, group_errors, group_prefix,
                                  other_values)))
                __M_writer(u'\n')
                # SOURCE LINE 119
            elif input.type == "upload_dataset":
                # SOURCE LINE 120
                if input.get_datatype(
                        trans, other_values
                ).composite_type is None:  #have non-composite upload appear as before
                    # SOURCE LINE 121
                    __M_writer(u'                    ')

                    if input.name in errors:
                        rep_errors = errors[input.name][0]
                    else:
                        rep_errors = dict()

                    # SOURCE LINE 126
                    __M_writer(u'\n                  ')
                    # SOURCE LINE 127
                    __M_writer(
                        unicode(
                            do_inputs(input.inputs, tool_state[input.name][0],
                                      rep_errors,
                                      prefix + input.name + "_" + str(0) + "|",
                                      other_values)))
                    __M_writer(u'\n')
                    # SOURCE LINE 128
                else:
                    # SOURCE LINE 129
                    __M_writer(
                        u'                    <div class="repeat-group">\n                        <div class="form-title-row"><b>'
                    )
                    # SOURCE LINE 130
                    __M_writer(unicode(input.group_title(other_values)))
                    __M_writer(u'</b></div>\n                        ')
                    # SOURCE LINE 131

                    repeat_state = tool_state[input.name]

                    # SOURCE LINE 133
                    __M_writer(u'\n')
                    # SOURCE LINE 134
                    for i in range(len(repeat_state)):
                        # SOURCE LINE 135
                        __M_writer(
                            u'                          <div class="repeat-group-item">\n                          '
                        )
                        # SOURCE LINE 136

                        if input.name in errors:
                            rep_errors = errors[input.name][i]
                        else:
                            rep_errors = dict()
                        index = repeat_state[i]['__index__']

                        # SOURCE LINE 142
                        __M_writer(
                            u'\n                          <div class="form-title-row"><b>File Contents for '
                        )
                        # SOURCE LINE 143
                        __M_writer(
                            unicode(
                                input.title_by_index(trans, i, other_values)))
                        __M_writer(u'</b></div>\n                          ')
                        # SOURCE LINE 144
                        __M_writer(
                            unicode(
                                do_inputs(
                                    input.inputs, repeat_state[i], rep_errors,
                                    prefix + input.name + "_" + str(index) +
                                    "|", other_values)))
                        __M_writer(u'\n')
                        # SOURCE LINE 146
                        __M_writer(u'                          </div>\n')
                        pass
                    # SOURCE LINE 149
                    __M_writer(u'                    </div>\n')
                    pass
                # SOURCE LINE 151
            else:
                # SOURCE LINE 152
                __M_writer(u'                ')
                __M_writer(
                    unicode(
                        row_for_param(prefix, input, tool_state, errors,
                                      other_values)))
                __M_writer(u'\n')
                pass
            pass
        # SOURCE LINE 155
        __M_writer(u'    ')
        return ''
    finally:
        context.caller_stack._pop_frame()
Esempio n. 14
0
def populate_state(request_context,
                   inputs,
                   incoming,
                   state,
                   errors=None,
                   context=None,
                   check=True,
                   simple_errors=True,
                   input_format='legacy'):
    """
    Populates nested state dict from incoming parameter values.
    >>> from galaxy.util import XML
    >>> from galaxy.util.bunch import Bunch
    >>> from galaxy.tools.parameters.basic import TextToolParameter, BooleanToolParameter
    >>> from galaxy.tools.parameters.grouping import Repeat
    >>> trans = Bunch(workflow_building_mode=False)
    >>> a = TextToolParameter(None, XML('<param name="a"/>'))
    >>> b = Repeat()
    >>> b.min = 0
    >>> b.max = 1
    >>> c = TextToolParameter(None, XML('<param name="c"/>'))
    >>> d = Repeat()
    >>> d.min = 0
    >>> d.max = 1
    >>> e = TextToolParameter(None, XML('<param name="e"/>'))
    >>> f = Conditional()
    >>> g = BooleanToolParameter(None, XML('<param name="g"/>'))
    >>> h = TextToolParameter(None, XML('<param name="h"/>'))
    >>> i = TextToolParameter(None, XML('<param name="i"/>'))
    >>> b.name = 'b'
    >>> b.inputs = dict([('c', c), ('d', d)])
    >>> d.name = 'd'
    >>> d.inputs = dict([('e', e), ('f', f)])
    >>> f.test_param = g
    >>> f.name = 'f'
    >>> f.cases = [Bunch(value='true', inputs= { 'h': h }), Bunch(value='false', inputs= { 'i': i })]
    >>> inputs = dict([('a',a),('b',b)])
    >>> flat = dict([('a', 1), ('b_0|c', 2), ('b_0|d_0|e', 3), ('b_0|d_0|f|h', 4), ('b_0|d_0|f|g', True)])
    >>> state = {}
    >>> populate_state(trans, inputs, flat, state, check=False)
    >>> print(state['a'])
    1
    >>> print(state['b'][0]['c'])
    2
    >>> print(state['b'][0]['d'][0]['e'])
    3
    >>> print(state['b'][0]['d'][0]['f']['h'])
    4
    >>> # now test with input_format='21.01'
    >>> nested = {'a': 1, 'b': [{'c': 2, 'd': [{'e': 3, 'f': {'h': 4, 'g': True}}]}]}
    >>> state_new = {}
    >>> populate_state(trans, inputs, nested, state_new, check=False, input_format='21.01')
    >>> print(state_new['a'])
    1
    >>> print(state_new['b'][0]['c'])
    2
    >>> print(state_new['b'][0]['d'][0]['e'])
    3
    >>> print(state_new['b'][0]['d'][0]['f']['h'])
    4

    """
    if errors is None:
        errors = {}
    if input_format == 'legacy':
        _populate_state_legacy(request_context,
                               inputs,
                               incoming,
                               state,
                               errors=errors,
                               context=context,
                               check=check,
                               simple_errors=simple_errors)
        return
    elif input_format == '21.01':
        context = ExpressionContext(state, context)
        for input in inputs.values():
            state[input.name] = input.get_initial_value(
                request_context, context)
            group_state = state[input.name]
            if input.type == 'repeat':
                if len(incoming[input.name]) > input.max or len(
                        incoming[input.name]) < input.min:
                    errors[
                        input.
                        name] = 'The number of repeat elements is outside the range specified by the tool.'
                else:
                    del group_state[:]
                    for rep in incoming[input.name]:
                        new_state = {}
                        group_state.append(new_state)
                        new_errors = {}
                        populate_state(request_context,
                                       input.inputs,
                                       rep,
                                       new_state,
                                       new_errors,
                                       context=context,
                                       check=check,
                                       simple_errors=simple_errors,
                                       input_format=input_format)
                        if new_errors:
                            errors[input.name] = new_errors

            elif input.type == 'conditional':
                test_param_value = incoming.get(input.name,
                                                {}).get(input.test_param.name)
                value, error = check_param(
                    request_context,
                    input.test_param,
                    test_param_value,
                    context,
                    simple_errors=simple_errors) if check else [
                        test_param_value, None
                    ]
                if error:
                    errors[input.test_param.name] = error
                else:
                    try:
                        current_case = input.get_current_case(value)
                        group_state = state[input.name] = {}
                        new_errors = {}
                        populate_state(request_context,
                                       input.cases[current_case].inputs,
                                       incoming.get(input.name),
                                       group_state,
                                       new_errors,
                                       context=context,
                                       check=check,
                                       simple_errors=simple_errors,
                                       input_format=input_format)
                        if new_errors:
                            errors[input.name] = new_errors
                        group_state['__current_case__'] = current_case
                    except Exception:
                        errors[
                            input.test_param.
                            name] = 'The selected case is unavailable/invalid.'
                group_state[input.test_param.name] = value

            elif input.type == 'section':
                new_errors = {}
                populate_state(request_context,
                               input.inputs,
                               incoming.get(input.name),
                               group_state,
                               new_errors,
                               context=context,
                               check=check,
                               simple_errors=simple_errors,
                               input_format=input_format)
                if new_errors:
                    errors[input.name] = new_errors

            elif input.type == 'upload_dataset':
                raise NotImplementedError

            else:
                param_value = _get_incoming_value(incoming, input.name,
                                                  state.get(input.name))
                value, error = check_param(
                    request_context,
                    input,
                    param_value,
                    context,
                    simple_errors=simple_errors) if check else [
                        param_value, None
                    ]
                if error:
                    errors[input.name] = error
                state[input.name] = value
    else:
        raise Exception(
            f'Input format {input_format} not recognized; input_format must be either legacy or 21.01.'
        )
Esempio n. 15
0
def set_metadata_portable():
    import galaxy.model
    tool_job_working_directory = os.path.abspath(os.getcwd())
    metadata_tmp_files_dir = os.path.join(tool_job_working_directory,
                                          "metadata")
    galaxy.model.metadata.MetadataTempFile.tmp_dir = metadata_tmp_files_dir

    metadata_params_path = os.path.join("metadata", "params.json")
    try:
        with open(metadata_params_path, "r") as f:
            metadata_params = json.load(f)
    except IOError:
        raise Exception("Failed to find metadata/params.json from cwd [%s]" %
                        tool_job_working_directory)
    datatypes_config = metadata_params["datatypes_config"]
    job_metadata = metadata_params["job_metadata"]
    provided_metadata_style = metadata_params.get("provided_metadata_style")
    max_metadata_value_size = metadata_params.get(
        "max_metadata_value_size") or 0
    outputs = metadata_params["outputs"]

    datatypes_registry = validate_and_load_datatypes_config(datatypes_config)
    tool_provided_metadata = load_job_metadata(job_metadata,
                                               provided_metadata_style)

    def set_meta(new_dataset_instance, file_dict):
        set_meta_with_tool_provided(new_dataset_instance, file_dict,
                                    set_meta_kwds, datatypes_registry,
                                    max_metadata_value_size)

    object_store_conf_path = os.path.join("metadata", "object_store_conf.json")
    extended_metadata_collection = os.path.exists(object_store_conf_path)

    object_store = None
    job_context = None
    version_string = ""

    export_store = None
    if extended_metadata_collection:
        from galaxy.tool_util.parser.stdio import ToolStdioRegex, ToolStdioExitCode
        tool_dict = metadata_params["tool"]
        stdio_exit_code_dicts, stdio_regex_dicts = tool_dict[
            "stdio_exit_codes"], tool_dict["stdio_regexes"]
        stdio_exit_codes = list(map(ToolStdioExitCode, stdio_exit_code_dicts))
        stdio_regexes = list(map(ToolStdioRegex, stdio_regex_dicts))

        with open(object_store_conf_path, "r") as f:
            config_dict = json.load(f)
        from galaxy.objectstore import build_object_store_from_config
        assert config_dict is not None
        object_store = build_object_store_from_config(None,
                                                      config_dict=config_dict)
        galaxy.model.Dataset.object_store = object_store

        outputs_directory = os.path.join(tool_job_working_directory, "outputs")
        if not os.path.exists(outputs_directory):
            outputs_directory = tool_job_working_directory

        # TODO: constants...
        if os.path.exists(os.path.join(outputs_directory, "tool_stdout")):
            with open(os.path.join(outputs_directory, "tool_stdout"),
                      "rb") as f:
                tool_stdout = f.read()

            with open(os.path.join(outputs_directory, "tool_stderr"),
                      "rb") as f:
                tool_stderr = f.read()
        elif os.path.exists(os.path.join(outputs_directory, "stdout")):
            # Puslar style working directory.
            with open(os.path.join(outputs_directory, "stdout"), "rb") as f:
                tool_stdout = f.read()

            with open(os.path.join(outputs_directory, "stderr"), "rb") as f:
                tool_stderr = f.read()

        job_id_tag = metadata_params["job_id_tag"]

        # TODO: this clearly needs to be refactored, nothing in runners should be imported here..
        from galaxy.job_execution.output_collect import default_exit_code_file, read_exit_code_from
        exit_code_file = default_exit_code_file(".", job_id_tag)
        tool_exit_code = read_exit_code_from(exit_code_file, job_id_tag)

        from galaxy.tool_util.output_checker import check_output, DETECTED_JOB_STATE
        check_output_detected_state, tool_stdout, tool_stderr, job_messages = check_output(
            stdio_regexes, stdio_exit_codes, tool_stdout, tool_stderr,
            tool_exit_code, job_id_tag)
        if check_output_detected_state == DETECTED_JOB_STATE.OK and not tool_provided_metadata.has_failed_outputs(
        ):
            final_job_state = galaxy.model.Job.states.OK
        else:
            final_job_state = galaxy.model.Job.states.ERROR

        from pulsar.client.staging import COMMAND_VERSION_FILENAME
        version_string = ""
        if os.path.exists(COMMAND_VERSION_FILENAME):
            version_string = open(COMMAND_VERSION_FILENAME).read()

        # TODO: handle outputs_to_working_directory?
        from galaxy.util.expressions import ExpressionContext
        job_context = ExpressionContext(
            dict(stdout=tool_stdout, stderr=tool_stderr))

        # Load outputs.
        import_model_store = store.imported_store_for_metadata(
            'metadata/outputs_new', object_store=object_store)
        export_store = store.DirectoryModelExportStore(
            'metadata/outputs_populated',
            serialize_dataset_objects=True,
            for_edit=True)

    for output_name, output_dict in outputs.items():
        if extended_metadata_collection:
            dataset_instance_id = output_dict["id"]
            dataset = import_model_store.sa_session.query(
                galaxy.model.HistoryDatasetAssociation).find(
                    dataset_instance_id)
            assert dataset is not None
        else:
            filename_in = os.path.join("metadata/metadata_in_%s" % output_name)
            dataset = cPickle.load(open(filename_in,
                                        'rb'))  # load DatasetInstance

        filename_kwds = os.path.join("metadata/metadata_kwds_%s" % output_name)
        filename_out = os.path.join("metadata/metadata_out_%s" % output_name)
        filename_results_code = os.path.join("metadata/metadata_results_%s" %
                                             output_name)
        override_metadata = os.path.join("metadata/metadata_override_%s" %
                                         output_name)
        dataset_filename_override = output_dict["filename_override"]

        # Same block as below...
        set_meta_kwds = stringify_dictionary_keys(
            json.load(open(filename_kwds))
        )  # load kwds; need to ensure our keywords are not unicode
        try:
            dataset.dataset.external_filename = dataset_filename_override
            store_by = metadata_params.get("object_store_store_by", "id")
            extra_files_dir_name = "dataset_%s_files" % getattr(
                dataset.dataset, store_by)
            files_path = os.path.abspath(
                os.path.join(tool_job_working_directory, "working",
                             extra_files_dir_name))
            dataset.dataset.external_extra_files_path = files_path
            file_dict = tool_provided_metadata.get_dataset_meta(
                output_name, dataset.dataset.id, dataset.dataset.uuid)
            if 'ext' in file_dict:
                dataset.extension = file_dict['ext']
            # Metadata FileParameter types may not be writable on a cluster node, and are therefore temporarily substituted with MetadataTempFiles
            override_metadata = json.load(open(override_metadata))
            for metadata_name, metadata_file_override in override_metadata:
                if galaxy.datatypes.metadata.MetadataTempFile.is_JSONified_value(
                        metadata_file_override):
                    metadata_file_override = galaxy.datatypes.metadata.MetadataTempFile.from_JSON(
                        metadata_file_override)
                setattr(dataset.metadata, metadata_name,
                        metadata_file_override)
            if output_dict.get("validate", False):
                set_validated_state(dataset)
            set_meta(dataset, file_dict)

            if extended_metadata_collection:
                meta = tool_provided_metadata.get_dataset_meta(
                    output_name, dataset.dataset.id, dataset.dataset.uuid)
                if meta:
                    context = ExpressionContext(meta, job_context)
                else:
                    context = job_context

                # Lazy and unattached
                # if getattr(dataset, "hidden_beneath_collection_instance", None):
                #    dataset.visible = False
                dataset.blurb = 'done'
                dataset.peek = 'no peek'
                dataset.info = (dataset.info or '')
                if context['stdout'].strip():
                    # Ensure white space between entries
                    dataset.info = dataset.info.rstrip(
                    ) + "\n" + context['stdout'].strip()
                if context['stderr'].strip():
                    # Ensure white space between entries
                    dataset.info = dataset.info.rstrip(
                    ) + "\n" + context['stderr'].strip()
                dataset.tool_version = version_string
                dataset.set_size()
                if 'uuid' in context:
                    dataset.dataset.uuid = context['uuid']
                object_store.update_from_file(dataset.dataset, create=True)
                from galaxy.job_execution.output_collect import collect_extra_files
                collect_extra_files(object_store, dataset, ".")
                if galaxy.model.Job.states.ERROR == final_job_state:
                    dataset.blurb = "error"
                    dataset.mark_unhidden()
                else:
                    # If the tool was expected to set the extension, attempt to retrieve it
                    if dataset.ext == 'auto':
                        dataset.extension = context.get('ext', 'data')
                        dataset.init_meta(copy_from=dataset)

                    # This has already been done:
                    # else:
                    #     self.external_output_metadata.load_metadata(dataset, output_name, self.sa_session, working_directory=self.working_directory, remote_metadata_directory=remote_metadata_directory)
                    line_count = context.get('line_count', None)
                    try:
                        # Certain datatype's set_peek methods contain a line_count argument
                        dataset.set_peek(line_count=line_count)
                    except TypeError:
                        # ... and others don't
                        dataset.set_peek()

                from galaxy.jobs import TOOL_PROVIDED_JOB_METADATA_KEYS
                for context_key in TOOL_PROVIDED_JOB_METADATA_KEYS:
                    if context_key in context:
                        context_value = context[context_key]
                        setattr(dataset, context_key, context_value)

                if extended_metadata_collection:
                    export_store.add_dataset(dataset)
                else:
                    cPickle.dump(dataset, open(filename_out, 'wb+'))
            else:
                dataset.metadata.to_JSON_dict(
                    filename_out)  # write out results of set_meta

            json.dump((True, 'Metadata has been set successfully'),
                      open(filename_results_code,
                           'wt+'))  # setting metadata has succeeded
        except Exception:
            json.dump((False, traceback.format_exc()),
                      open(filename_results_code,
                           'wt+'))  # setting metadata has failed somehow

    if extended_metadata_collection:
        # discover extra outputs...
        from galaxy.job_execution.output_collect import collect_dynamic_outputs, collect_primary_datasets, SessionlessJobContext

        job_context = SessionlessJobContext(
            metadata_params, tool_provided_metadata, object_store,
            export_store, import_model_store,
            os.path.join(tool_job_working_directory, "working"))

        output_collections = {}
        for name, output_collection in metadata_params[
                "output_collections"].items():
            output_collections[name] = import_model_store.sa_session.query(
                galaxy.model.HistoryDatasetCollectionAssociation).find(
                    output_collection["id"])
        outputs = {}
        for name, output in metadata_params["outputs"].items():
            outputs[name] = import_model_store.sa_session.query(
                galaxy.model.HistoryDatasetAssociation).find(output["id"])

        input_ext = json.loads(metadata_params["job_params"].get(
            "__input_ext", '"data"'))
        collect_primary_datasets(
            job_context,
            outputs,
            input_ext=input_ext,
        )
        collect_dynamic_outputs(job_context, output_collections)

    if export_store:
        export_store._finalize()
    write_job_metadata(tool_job_working_directory, job_metadata, set_meta,
                       tool_provided_metadata)
Esempio n. 16
0
    """Create a new `DefaultToolState` for the received tool.  Only inputs on the first page will be initialized."""
    state = galaxy.tools.DefaultToolState()
    state.inputs = {}
    if invalid:
        # We're attempting to display a tool in the tool shed that has been determined to have errors, so is invalid.
        return state
    try:
        # Attempt to generate the tool state using the standard Galaxy-side code
        return tool.new_state(trans)
    except Exception, e:
        # Fall back to building tool state as below
        log.debug(
            'Failed to build tool state for tool "%s" using standard method, will try to fall back on custom method: %s',
            tool.id, e)
    inputs = tool.inputs_by_page[0]
    context = ExpressionContext(state.inputs, parent=None)
    for input in inputs.itervalues():
        try:
            state.inputs[input.name] = input.get_initial_value(trans, context)
        except:
            # FIXME: not all values should be an empty list
            state.inputs[input.name] = []
    return state


def panel_entry_per_tool(tool_section_dict):
    # Return True if tool_section_dict looks like this.
    # {<Tool guid> :
    #    [{ tool_config : <tool_config_file>,
    #       id: <ToolSection id>,
    #       version : <ToolSection version>,
Esempio n. 17
0
def render_do_inputs(context, inputs, values, errors, prefix, ctx=None):
    context.caller_stack._push_frame()
    try:

        def row_for_param(param,
                          value,
                          error_dict,
                          prefix,
                          ctx,
                          allow_runtime=True):
            return render_row_for_param(context, param, value, error_dict,
                                        prefix, ctx, allow_runtime)

        def do_inputs(inputs, values, errors, prefix, ctx=None):
            return render_do_inputs(context, inputs, values, errors, prefix,
                                    ctx)

        len = context.get('len', UNDEFINED)
        range = context.get('range', UNDEFINED)
        dict = context.get('dict', UNDEFINED)
        str = context.get('str', UNDEFINED)
        enumerate = context.get('enumerate', UNDEFINED)
        trans = context.get('trans', UNDEFINED)
        ExpressionContext = context.get('ExpressionContext', UNDEFINED)
        __M_writer = context.writer()
        # SOURCE LINE 6
        __M_writer(u'\n  ')
        # SOURCE LINE 7
        ctx = ExpressionContext(values, ctx)

        __M_writer(u'\n')
        # SOURCE LINE 8
        for input_index, input in enumerate(inputs.itervalues()):
            # SOURCE LINE 9
            if input.type == "repeat":
                # SOURCE LINE 10
                __M_writer(
                    u'      <div class="repeat-group form-row">\n          <label>'
                )
                # SOURCE LINE 11
                __M_writer(unicode(input.title_plural))
                __M_writer(u':</label>\n          ')
                # SOURCE LINE 12
                repeat_values = values[input.name]

                __M_writer(u'\n')
                # SOURCE LINE 13
                for i in range(len(repeat_values)):
                    # SOURCE LINE 14
                    __M_writer(u'            ')

                    if input.name in errors:
                        rep_errors = errors[input.name][i]
                    else:
                        rep_errors = dict()
                    index = repeat_values[i]['__index__']

                    # SOURCE LINE 20
                    __M_writer(
                        u'\n            <div class="repeat-group-item">\n            <div class="form-title-row"><label>'
                    )
                    # SOURCE LINE 22
                    __M_writer(unicode(input.title))
                    __M_writer(u' ')
                    __M_writer(unicode(i + 1))
                    __M_writer(u'</label></div>\n            ')
                    # SOURCE LINE 23
                    __M_writer(
                        unicode(
                            do_inputs(
                                input.inputs, repeat_values[i], rep_errors,
                                prefix + input.name + "_" + str(index) + "|",
                                ctx)))
                    __M_writer(
                        u'\n            <div class="form-row"><input type="submit" name="'
                    )
                    # SOURCE LINE 24
                    __M_writer(unicode(prefix))
                    __M_writer(unicode(input.name))
                    __M_writer(u'_')
                    __M_writer(unicode(index))
                    __M_writer(u'_remove" value="Remove ')
                    __M_writer(unicode(input.title))
                    __M_writer(u' ')
                    __M_writer(unicode(i + 1))
                    __M_writer(u'"></div>\n            </div>\n')
                # SOURCE LINE 27
                __M_writer(
                    u'          <div class="form-row"><input type="submit" name="'
                )
                __M_writer(unicode(prefix))
                __M_writer(unicode(input.name))
                __M_writer(u'_add" value="Add new ')
                __M_writer(unicode(input.title))
                __M_writer(u'"></div>\n      </div>\n')
                # SOURCE LINE 29
            elif input.type == "conditional":
                # SOURCE LINE 30
                __M_writer(u'      ')
                group_values = values[input.name]

                __M_writer(u'\n      ')
                # SOURCE LINE 31
                current_case = group_values['__current_case__']

                __M_writer(u'\n      ')
                # SOURCE LINE 32
                group_prefix = prefix + input.name + "|"

                __M_writer(u'\n      ')
                # SOURCE LINE 33
                group_errors = errors.get(input.name, {})

                __M_writer(u'\n      ')
                # SOURCE LINE 34
                __M_writer(
                    unicode(
                        row_for_param(input.test_param,
                                      group_values[input.test_param.name],
                                      group_errors,
                                      group_prefix,
                                      ctx,
                                      allow_runtime=False)))
                __M_writer(u'\n      ')
                # SOURCE LINE 35
                __M_writer(
                    unicode(
                        do_inputs(input.cases[current_case].inputs,
                                  group_values, group_errors, group_prefix,
                                  ctx)))
                __M_writer(u'\n')
                # SOURCE LINE 36
            else:
                # SOURCE LINE 37
                if input.name in values:
                    # SOURCE LINE 38
                    __M_writer(u'        ')
                    __M_writer(
                        unicode(
                            row_for_param(input, values[input.name], errors,
                                          prefix, ctx)))
                    __M_writer(u'\n')
                    # SOURCE LINE 39
                else:
                    # SOURCE LINE 40
                    __M_writer(u'        ')
                    errors[input.name] = 'Value not stored, displaying default'

                    __M_writer(u'\n        ')
                    # SOURCE LINE 41
                    __M_writer(
                        unicode(
                            row_for_param(
                                input, input.get_initial_value(trans, values),
                                errors, prefix, ctx)))
                    __M_writer(u'\n')
        return ''
    finally:
        context.caller_stack._pop_frame()
Esempio n. 18
0
def render_do_inputs(context,
                     inputs,
                     values,
                     errors,
                     prefix,
                     step,
                     other_values=None,
                     already_used=None):
    context.caller_stack._push_frame()
    try:

        def row_for_param(param, value, other_values, error_dict, prefix, step,
                          already_used):
            return render_row_for_param(context, param, value, other_values,
                                        error_dict, prefix, step, already_used)

        def do_inputs(inputs,
                      values,
                      errors,
                      prefix,
                      step,
                      other_values=None,
                      already_used=None):
            return render_do_inputs(context, inputs, values, errors, prefix,
                                    step, other_values, already_used)

        len = context.get('len', UNDEFINED)
        range = context.get('range', UNDEFINED)
        dict = context.get('dict', UNDEFINED)
        str = context.get('str', UNDEFINED)
        enumerate = context.get('enumerate', UNDEFINED)
        __M_writer = context.writer()
        # SOURCE LINE 325
        __M_writer(u'\n  ')
        # SOURCE LINE 326

        from galaxy.util.expressions import ExpressionContext
        other_values = ExpressionContext(values, other_values)

        # SOURCE LINE 329
        __M_writer(u'\n')
        # SOURCE LINE 330
        for input_index, input in enumerate(inputs.itervalues()):
            # SOURCE LINE 331
            if input.type == "repeat":
                # SOURCE LINE 332
                __M_writer(
                    u'      <div class="repeat-group">\n          <div class="form-title-row"><b>'
                )
                # SOURCE LINE 333
                __M_writer(unicode(input.title_plural))
                __M_writer(u'</b></div>\n          ')
                # SOURCE LINE 334
                repeat_values = values[input.name]

                __M_writer(u'\n')
                # SOURCE LINE 335
                for i in range(len(repeat_values)):
                    # SOURCE LINE 336
                    if input.name in errors:
                        # SOURCE LINE 337
                        __M_writer(u'                ')
                        rep_errors = errors[input.name][i]

                        __M_writer(u'\n')
                        # SOURCE LINE 338
                    else:
                        # SOURCE LINE 339
                        __M_writer(u'                ')
                        rep_errors = dict()

                        __M_writer(u'\n')
                        pass
                    # SOURCE LINE 341
                    __M_writer(
                        u'            <div class="repeat-group-item">\n            '
                    )
                    # SOURCE LINE 342
                    index = repeat_values[i]['__index__']

                    __M_writer(
                        u'\n            <div class="form-title-row"><b>')
                    # SOURCE LINE 343
                    __M_writer(unicode(input.title))
                    __M_writer(u' ')
                    __M_writer(unicode(i + 1))
                    __M_writer(u'</b></div>\n            ')
                    # SOURCE LINE 344
                    __M_writer(
                        unicode(
                            do_inputs(
                                input.inputs, repeat_values[i], rep_errors,
                                prefix + input.name + "_" + str(index) + "|",
                                step, other_values, already_used)))
                    __M_writer(u'\n')
                    # SOURCE LINE 346
                    __M_writer(u'            </div>\n')
                    pass
                # SOURCE LINE 349
                __M_writer(u'      </div>\n')
                # SOURCE LINE 350
            elif input.type == "conditional":
                # SOURCE LINE 351
                if input.is_job_resource_conditional:
                    # SOURCE LINE 352
                    __M_writer(u'        ')
                    continue

                    __M_writer(u'\n')
                    pass
                # SOURCE LINE 354
                __M_writer(u'      ')
                group_values = values[input.name]

                __M_writer(u'\n      ')
                # SOURCE LINE 355
                current_case = group_values['__current_case__']

                __M_writer(u'\n      ')
                # SOURCE LINE 356
                new_prefix = prefix + input.name + "|"

                __M_writer(u'\n      ')
                # SOURCE LINE 357
                group_errors = errors.get(input.name, {})

                __M_writer(
                    u'\n      <span class="conditional-start"></span>\n      ')
                # SOURCE LINE 359
                __M_writer(
                    unicode(
                        row_for_param(input.test_param,
                                      group_values[input.test_param.name],
                                      other_values, group_errors, prefix, step,
                                      already_used)))
                __M_writer(u'\n      ')
                # SOURCE LINE 360
                __M_writer(
                    unicode(
                        do_inputs(input.cases[current_case].inputs,
                                  group_values, group_errors, new_prefix, step,
                                  other_values, already_used)))
                __M_writer(u'\n')
                # SOURCE LINE 361
            else:
                # SOURCE LINE 362
                __M_writer(u'      ')
                __M_writer(
                    unicode(
                        row_for_param(input, values[input.name], other_values,
                                      errors, prefix, step, already_used)))
                __M_writer(u'\n')
                pass
            pass
        return ''
    finally:
        context.caller_stack._pop_frame()
Esempio n. 19
0
 def get_dataset_finish_context( self, job_context, dataset ):
     for meta in self.get_tool_provided_job_metadata():
         if meta['type'] == 'dataset' and meta['dataset_id'] == dataset.id:
             return ExpressionContext( meta, job_context )
     return job_context
Esempio n. 20
0
def render_do_inputs(context,
                     inputs,
                     tool_state,
                     errors,
                     prefix,
                     other_values=None):
    context.caller_stack._push_frame()
    try:
        _import_ns = {}
        _mako_get_namespace(context, '__anon_0x5a0dfd0')._populate(
            _import_ns, [u'overlay'])
        _mako_get_namespace(context, '__anon_0x5a0df10')._populate(
            _import_ns, [u'render_msg'])

        def row_for_param(prefix, param, parent_state, parent_errors,
                          other_values):
            return render_row_for_param(context, prefix, param, parent_state,
                                        parent_errors, other_values)

        h = _import_ns.get('h', context.get('h', UNDEFINED))

        def do_inputs(inputs, tool_state, errors, prefix, other_values=None):
            return render_do_inputs(context, inputs, tool_state, errors,
                                    prefix, other_values)

        len = _import_ns.get('len', context.get('len', UNDEFINED))
        range = _import_ns.get('range', context.get('range', UNDEFINED))
        dict = _import_ns.get('dict', context.get('dict', UNDEFINED))
        str = _import_ns.get('str', context.get('str', UNDEFINED))
        enumerate = _import_ns.get('enumerate',
                                   context.get('enumerate', UNDEFINED))
        trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
        __M_writer = context.writer()
        # SOURCE LINE 120
        __M_writer(u'\n    ')
        # SOURCE LINE 121

        from galaxy.util.expressions import ExpressionContext
        other_values = ExpressionContext(tool_state, other_values)

        # SOURCE LINE 124
        __M_writer(u'\n')
        # SOURCE LINE 125
        for input_index, input in enumerate(inputs.itervalues()):
            # SOURCE LINE 126
            if not input.visible:
                # SOURCE LINE 127
                __M_writer(u'            ')
                pass

                __M_writer(u'\n')
                # SOURCE LINE 128
            elif input.type == "repeat":
                # SOURCE LINE 129
                __M_writer(
                    u'          <div class="repeat-group">\n              <div class="form-title-row"><strong>'
                )
                # SOURCE LINE 130
                __M_writer(unicode(input.title_plural))
                __M_writer(u'</strong>\n')
                # SOURCE LINE 131
                if input.help:
                    # SOURCE LINE 132
                    __M_writer(
                        u'                  <div class="toolParamHelp" style="clear: both;">\n                      '
                    )
                    # SOURCE LINE 133
                    __M_writer(unicode(input.help))
                    __M_writer(u'\n                  </div>\n')
                    pass
                # SOURCE LINE 136
                __M_writer(u'              </div>\n              ')
                # SOURCE LINE 137
                repeat_state = tool_state[input.name]

                __M_writer(u'\n')
                # SOURCE LINE 138
                for i in range(len(repeat_state)):
                    # SOURCE LINE 139
                    __M_writer(
                        u'                <div class="repeat-group-item">\n                    '
                    )
                    # SOURCE LINE 140

                    if input.name in errors:
                        rep_errors = errors[input.name][i]
                    else:
                        rep_errors = dict()
                    index = repeat_state[i]['__index__']

                    # SOURCE LINE 146
                    __M_writer(
                        u'\n                    <div class="form-title-row"><strong>'
                    )
                    # SOURCE LINE 147
                    __M_writer(unicode(input.title))
                    __M_writer(u' ')
                    __M_writer(unicode(i + 1))
                    __M_writer(u'</strong></div>\n                    ')
                    # SOURCE LINE 148
                    __M_writer(
                        unicode(
                            do_inputs(
                                input.inputs, repeat_state[i], rep_errors,
                                prefix + input.name + "_" + str(index) + "|",
                                other_values)))
                    __M_writer(
                        u'\n                    <div class="form-row"><input type="submit" class="btn" name="'
                    )
                    # SOURCE LINE 149
                    __M_writer(unicode(prefix))
                    __M_writer(unicode(input.name))
                    __M_writer(u'_')
                    __M_writer(unicode(index))
                    __M_writer(u'_remove" value="Remove ')
                    __M_writer(unicode(input.title))
                    __M_writer(u' ')
                    __M_writer(unicode(i + 1))
                    __M_writer(u'"></div>\n                </div>\n')
                    # SOURCE LINE 151
                    if rep_errors.has_key('__index__'):
                        # SOURCE LINE 152
                        __M_writer(
                            u'                    <div><img style="vertical-align: middle;" src="'
                        )
                        __M_writer(
                            unicode(
                                h.url_for('/static/style/error_small.png')))
                        __M_writer(
                            u'">&nbsp;<span style="vertical-align: middle;">')
                        __M_writer(unicode(rep_errors['__index__']))
                        __M_writer(u'</span></div>\n')
                        pass
                    pass
                # SOURCE LINE 155
                __M_writer(
                    u'              <div class="form-row"><input type="submit" class="btn" name="'
                )
                __M_writer(unicode(prefix))
                __M_writer(unicode(input.name))
                __M_writer(u'_add" value="Add new ')
                __M_writer(unicode(input.title))
                __M_writer(u'"></div>\n          </div>\n')
                # SOURCE LINE 157
            elif input.type == "conditional":
                # SOURCE LINE 158
                __M_writer(u'            ')

                group_state = tool_state[input.name]
                group_errors = errors.get(input.name, {})
                current_case = group_state['__current_case__']
                group_prefix = prefix + input.name + "|"

                # SOURCE LINE 163
                __M_writer(u'\n')
                # SOURCE LINE 164
                if input.value_ref_in_group:
                    # SOURCE LINE 165
                    __M_writer(u'                ')
                    __M_writer(
                        unicode(
                            row_for_param(group_prefix, input.test_param,
                                          group_state, group_errors,
                                          other_values)))
                    __M_writer(u'\n')
                    pass
                # SOURCE LINE 167
                __M_writer(u'            ')
                __M_writer(
                    unicode(
                        do_inputs(input.cases[current_case].inputs,
                                  group_state, group_errors, group_prefix,
                                  other_values)))
                __M_writer(u'\n')
                # SOURCE LINE 168
            elif input.type == "upload_dataset":
                # SOURCE LINE 169
                if input.get_datatype(
                        trans, other_values
                ).composite_type is None:  #have non-composite upload appear as before
                    # SOURCE LINE 170
                    __M_writer(u'                ')

                    if input.name in errors:
                        rep_errors = errors[input.name][0]
                    else:
                        rep_errors = dict()

                    # SOURCE LINE 175
                    __M_writer(u'\n              ')
                    # SOURCE LINE 176
                    __M_writer(
                        unicode(
                            do_inputs(input.inputs, tool_state[input.name][0],
                                      rep_errors,
                                      prefix + input.name + "_" + str(0) + "|",
                                      other_values)))
                    __M_writer(u'\n')
                    # SOURCE LINE 177
                else:
                    # SOURCE LINE 178
                    __M_writer(
                        u'                <div class="repeat-group">\n                    <div class="form-title-row"><strong>'
                    )
                    # SOURCE LINE 179
                    __M_writer(unicode(input.group_title(other_values)))
                    __M_writer(u'</strong></div>\n                    ')
                    # SOURCE LINE 180

                    repeat_state = tool_state[input.name]

                    # SOURCE LINE 182
                    __M_writer(u'\n')
                    # SOURCE LINE 183
                    for i in range(len(repeat_state)):
                        # SOURCE LINE 184
                        __M_writer(
                            u'                      <div class="repeat-group-item">\n                      '
                        )
                        # SOURCE LINE 185

                        if input.name in errors:
                            rep_errors = errors[input.name][i]
                        else:
                            rep_errors = dict()
                        index = repeat_state[i]['__index__']

                        # SOURCE LINE 191
                        __M_writer(
                            u'\n                      <div class="form-title-row"><strong>File Contents for '
                        )
                        # SOURCE LINE 192
                        __M_writer(
                            unicode(
                                input.title_by_index(trans, i, other_values)))
                        __M_writer(u'</strong></div>\n                      ')
                        # SOURCE LINE 193
                        __M_writer(
                            unicode(
                                do_inputs(
                                    input.inputs, repeat_state[i], rep_errors,
                                    prefix + input.name + "_" + str(index) +
                                    "|", other_values)))
                        __M_writer(u'\n')
                        # SOURCE LINE 195
                        __M_writer(u'                      </div>\n')
                        pass
                    # SOURCE LINE 198
                    __M_writer(u'                </div>\n')
                    pass
                # SOURCE LINE 200
            else:
                # SOURCE LINE 201
                __M_writer(u'            ')
                __M_writer(
                    unicode(
                        row_for_param(prefix, input, tool_state, errors,
                                      other_values)))
                __M_writer(u'\n')
                pass
            pass
        return ''
    finally:
        context.caller_stack._pop_frame()
Esempio n. 21
0
def set_metadata_portable():
    tool_job_working_directory = os.path.abspath(os.getcwd())
    metadata_tmp_files_dir = os.path.join(tool_job_working_directory,
                                          "metadata")
    MetadataTempFile.tmp_dir = metadata_tmp_files_dir

    metadata_params_path = os.path.join("metadata", "params.json")
    try:
        with open(metadata_params_path) as f:
            metadata_params = json.load(f)
    except OSError:
        raise Exception(
            f"Failed to find metadata/params.json from cwd [{tool_job_working_directory}]"
        )
    datatypes_config = metadata_params["datatypes_config"]
    job_metadata = metadata_params["job_metadata"]
    provided_metadata_style = metadata_params.get("provided_metadata_style")
    max_metadata_value_size = metadata_params.get(
        "max_metadata_value_size") or 0
    outputs = metadata_params["outputs"]

    datatypes_registry = validate_and_load_datatypes_config(datatypes_config)
    tool_provided_metadata = load_job_metadata(job_metadata,
                                               provided_metadata_style)

    def set_meta(new_dataset_instance, file_dict):
        set_meta_with_tool_provided(new_dataset_instance, file_dict,
                                    set_meta_kwds, datatypes_registry,
                                    max_metadata_value_size)

    object_store_conf_path = os.path.join("metadata", "object_store_conf.json")
    extended_metadata_collection = os.path.exists(object_store_conf_path)

    object_store = None
    job_context = None
    version_string = ""

    export_store = None
    final_job_state = Job.states.OK
    if extended_metadata_collection:
        tool_dict = metadata_params["tool"]
        stdio_exit_code_dicts, stdio_regex_dicts = tool_dict[
            "stdio_exit_codes"], tool_dict["stdio_regexes"]
        stdio_exit_codes = list(map(ToolStdioExitCode, stdio_exit_code_dicts))
        stdio_regexes = list(map(ToolStdioRegex, stdio_regex_dicts))

        with open(object_store_conf_path) as f:
            config_dict = json.load(f)
        assert config_dict is not None
        object_store = build_object_store_from_config(None,
                                                      config_dict=config_dict)
        Dataset.object_store = object_store

        outputs_directory = os.path.join(tool_job_working_directory, "outputs")
        if not os.path.exists(outputs_directory):
            outputs_directory = tool_job_working_directory

        # TODO: constants...
        if os.path.exists(os.path.join(outputs_directory, "tool_stdout")):
            with open(os.path.join(outputs_directory, "tool_stdout"),
                      "rb") as f:
                tool_stdout = f.read()

            with open(os.path.join(outputs_directory, "tool_stderr"),
                      "rb") as f:
                tool_stderr = f.read()
        elif os.path.exists(os.path.join(tool_job_working_directory,
                                         "stdout")):
            with open(os.path.join(tool_job_working_directory, "stdout"),
                      "rb") as f:
                tool_stdout = f.read()

            with open(os.path.join(tool_job_working_directory, "stderr"),
                      "rb") as f:
                tool_stderr = f.read()
        elif os.path.exists(os.path.join(outputs_directory, "stdout")):
            # Puslar style output directory? Was this ever used - did this ever work?
            with open(os.path.join(outputs_directory, "stdout"), "rb") as f:
                tool_stdout = f.read()

            with open(os.path.join(outputs_directory, "stderr"), "rb") as f:
                tool_stderr = f.read()
        else:
            wdc = os.listdir(tool_job_working_directory)
            odc = os.listdir(outputs_directory)
            error_desc = "Failed to find tool_stdout or tool_stderr for this job, cannot collect metadata"
            error_extra = f"Working dir contents [{wdc}], output directory contents [{odc}]"
            log.warn(f"{error_desc}. {error_extra}")
            raise Exception(error_desc)

        job_id_tag = metadata_params["job_id_tag"]

        exit_code_file = default_exit_code_file(".", job_id_tag)
        tool_exit_code = read_exit_code_from(exit_code_file, job_id_tag)

        check_output_detected_state, tool_stdout, tool_stderr, job_messages = check_output(
            stdio_regexes, stdio_exit_codes, tool_stdout, tool_stderr,
            tool_exit_code, job_id_tag)
        if check_output_detected_state == DETECTED_JOB_STATE.OK and not tool_provided_metadata.has_failed_outputs(
        ):
            final_job_state = Job.states.OK
        else:
            final_job_state = Job.states.ERROR

        version_string = ""
        if os.path.exists(COMMAND_VERSION_FILENAME):
            version_string = open(COMMAND_VERSION_FILENAME).read()

        expression_context = ExpressionContext(
            dict(stdout=tool_stdout, stderr=tool_stderr))

        # Load outputs.
        export_store = store.DirectoryModelExportStore(
            'metadata/outputs_populated',
            serialize_dataset_objects=True,
            for_edit=True,
            strip_metadata_files=False,
            serialize_jobs=False)
    try:
        import_model_store = store.imported_store_for_metadata(
            'metadata/outputs_new', object_store=object_store)
    except AssertionError:
        # Remove in 21.09, this should only happen for jobs that started on <= 20.09 and finish now
        import_model_store = None

    job_context = SessionlessJobContext(
        metadata_params,
        tool_provided_metadata,
        object_store,
        export_store,
        import_model_store,
        os.path.join(tool_job_working_directory, "working"),
        final_job_state=final_job_state,
    )

    unnamed_id_to_path = {}
    for unnamed_output_dict in job_context.tool_provided_metadata.get_unnamed_outputs(
    ):
        destination = unnamed_output_dict["destination"]
        elements = unnamed_output_dict["elements"]
        destination_type = destination["type"]
        if destination_type == 'hdas':
            for element in elements:
                filename = element.get('filename')
                if filename:
                    unnamed_id_to_path[element['object_id']] = os.path.join(
                        job_context.job_working_directory, filename)

    for output_name, output_dict in outputs.items():
        dataset_instance_id = output_dict["id"]
        klass = getattr(
            galaxy.model,
            output_dict.get('model_class', 'HistoryDatasetAssociation'))
        dataset = None
        if import_model_store:
            dataset = import_model_store.sa_session.query(klass).find(
                dataset_instance_id)
        if dataset is None:
            # legacy check for jobs that started before 21.01, remove on 21.05
            filename_in = os.path.join(f"metadata/metadata_in_{output_name}")
            import pickle
            dataset = pickle.load(open(filename_in,
                                       'rb'))  # load DatasetInstance
        assert dataset is not None

        filename_kwds = os.path.join(f"metadata/metadata_kwds_{output_name}")
        filename_out = os.path.join(f"metadata/metadata_out_{output_name}")
        filename_results_code = os.path.join(
            f"metadata/metadata_results_{output_name}")
        override_metadata = os.path.join(
            f"metadata/metadata_override_{output_name}")
        dataset_filename_override = output_dict["filename_override"]
        # pre-20.05 this was a per job parameter and not a per dataset parameter, drop in 21.XX
        legacy_object_store_store_by = metadata_params.get(
            "object_store_store_by", "id")

        # Same block as below...
        set_meta_kwds = stringify_dictionary_keys(
            json.load(open(filename_kwds))
        )  # load kwds; need to ensure our keywords are not unicode
        try:
            dataset.dataset.external_filename = unnamed_id_to_path.get(
                dataset_instance_id, dataset_filename_override)
            store_by = output_dict.get("object_store_store_by",
                                       legacy_object_store_store_by)
            extra_files_dir_name = f"dataset_{getattr(dataset.dataset, store_by)}_files"
            files_path = os.path.abspath(
                os.path.join(tool_job_working_directory, "working",
                             extra_files_dir_name))
            dataset.dataset.external_extra_files_path = files_path
            file_dict = tool_provided_metadata.get_dataset_meta(
                output_name, dataset.dataset.id, dataset.dataset.uuid)
            if 'ext' in file_dict:
                dataset.extension = file_dict['ext']
            # Metadata FileParameter types may not be writable on a cluster node, and are therefore temporarily substituted with MetadataTempFiles
            override_metadata = json.load(open(override_metadata))
            for metadata_name, metadata_file_override in override_metadata:
                if MetadataTempFile.is_JSONified_value(metadata_file_override):
                    metadata_file_override = MetadataTempFile.from_JSON(
                        metadata_file_override)
                setattr(dataset.metadata, metadata_name,
                        metadata_file_override)
            if output_dict.get("validate", False):
                set_validated_state(dataset)
            if dataset_instance_id not in unnamed_id_to_path:
                # We're going to run through set_metadata in collect_dynamic_outputs with more contextual metadata,
                # so skip set_meta here.
                set_meta(dataset, file_dict)

            if extended_metadata_collection:
                meta = tool_provided_metadata.get_dataset_meta(
                    output_name, dataset.dataset.id, dataset.dataset.uuid)
                if meta:
                    context = ExpressionContext(meta, expression_context)
                else:
                    context = expression_context

                # Lazy and unattached
                # if getattr(dataset, "hidden_beneath_collection_instance", None):
                #    dataset.visible = False
                dataset.blurb = 'done'
                dataset.peek = 'no peek'
                dataset.info = (dataset.info or '')
                if context['stdout'].strip():
                    # Ensure white space between entries
                    dataset.info = f"{dataset.info.rstrip()}\n{context['stdout'].strip()}"
                if context['stderr'].strip():
                    # Ensure white space between entries
                    dataset.info = f"{dataset.info.rstrip()}\n{context['stderr'].strip()}"
                dataset.tool_version = version_string
                dataset.set_size()
                if 'uuid' in context:
                    dataset.dataset.uuid = context['uuid']
                if dataset_filename_override and dataset_filename_override != dataset.file_name:
                    # This has to be a job with outputs_to_working_directory set.
                    # We update the object store with the created output file.
                    object_store.update_from_file(
                        dataset.dataset,
                        file_name=dataset_filename_override,
                        create=True)
                collect_extra_files(object_store, dataset, ".")
                if Job.states.ERROR == final_job_state:
                    dataset.blurb = "error"
                    dataset.mark_unhidden()
                else:
                    # If the tool was expected to set the extension, attempt to retrieve it
                    if dataset.ext == 'auto':
                        dataset.extension = context.get('ext', 'data')
                        dataset.init_meta(copy_from=dataset)

                    # This has already been done:
                    # else:
                    #     self.external_output_metadata.load_metadata(dataset, output_name, self.sa_session, working_directory=self.working_directory, remote_metadata_directory=remote_metadata_directory)
                    line_count = context.get('line_count', None)
                    try:
                        # Certain datatype's set_peek methods contain a line_count argument
                        dataset.set_peek(line_count=line_count)
                    except TypeError:
                        # ... and others don't
                        dataset.set_peek()

                for context_key in TOOL_PROVIDED_JOB_METADATA_KEYS:
                    if context_key in context:
                        context_value = context[context_key]
                        setattr(dataset, context_key, context_value)
                # We never want to persist the external_filename.
                dataset.dataset.external_filename = None
                export_store.add_dataset(dataset)
            else:
                dataset.metadata.to_JSON_dict(
                    filename_out)  # write out results of set_meta

            json.dump((True, 'Metadata has been set successfully'),
                      open(filename_results_code,
                           'wt+'))  # setting metadata has succeeded
        except Exception:
            json.dump((False, traceback.format_exc()),
                      open(filename_results_code,
                           'wt+'))  # setting metadata has failed somehow

    if extended_metadata_collection:
        # discover extra outputs...
        output_collections = {}
        for name, output_collection in metadata_params[
                "output_collections"].items():
            output_collections[name] = import_model_store.sa_session.query(
                HistoryDatasetCollectionAssociation).find(
                    output_collection["id"])
        outputs = {}
        for name, output in metadata_params["outputs"].items():
            klass = getattr(
                galaxy.model,
                output.get('model_class', 'HistoryDatasetAssociation'))
            outputs[name] = import_model_store.sa_session.query(klass).find(
                output["id"])

        input_ext = json.loads(metadata_params["job_params"].get(
            "__input_ext", '"data"'))
        collect_primary_datasets(
            job_context,
            outputs,
            input_ext=input_ext,
        )
        collect_dynamic_outputs(job_context, output_collections)

    if export_store:
        export_store._finalize()
    write_job_metadata(tool_job_working_directory, job_metadata, set_meta,
                       tool_provided_metadata)
Esempio n. 22
0
def set_metadata_portable():
    tool_job_working_directory = os.path.abspath(os.getcwd())
    metadata_tmp_files_dir = os.path.join(tool_job_working_directory, "metadata")
    MetadataTempFile.tmp_dir = metadata_tmp_files_dir

    metadata_params = get_metadata_params(tool_job_working_directory)
    datatypes_config = metadata_params["datatypes_config"]
    job_metadata = metadata_params["job_metadata"]
    provided_metadata_style = metadata_params.get("provided_metadata_style")
    max_metadata_value_size = metadata_params.get("max_metadata_value_size") or 0
    max_discovered_files = metadata_params.get("max_discovered_files")
    outputs = metadata_params["outputs"]

    datatypes_registry = validate_and_load_datatypes_config(datatypes_config)
    tool_provided_metadata = load_job_metadata(job_metadata, provided_metadata_style)

    def set_meta(new_dataset_instance, file_dict):
        set_meta_with_tool_provided(new_dataset_instance, file_dict, set_meta_kwds, datatypes_registry, max_metadata_value_size)

    try:
        object_store = get_object_store(tool_job_working_directory=tool_job_working_directory)
    except (FileNotFoundError, AssertionError):
        object_store = None
    extended_metadata_collection = bool(object_store)
    job_context = None
    version_string = None

    export_store = None
    final_job_state = Job.states.OK
    job_messages = []
    if extended_metadata_collection:
        tool_dict = metadata_params["tool"]
        stdio_exit_code_dicts, stdio_regex_dicts = tool_dict["stdio_exit_codes"], tool_dict["stdio_regexes"]
        stdio_exit_codes = list(map(ToolStdioExitCode, stdio_exit_code_dicts))
        stdio_regexes = list(map(ToolStdioRegex, stdio_regex_dicts))

        outputs_directory = os.path.join(tool_job_working_directory, "outputs")
        if not os.path.exists(outputs_directory):
            outputs_directory = tool_job_working_directory

        # TODO: constants...
        locations = [
            (outputs_directory, 'tool_'),
            (tool_job_working_directory, ''),
            (outputs_directory, ''),  # # Pulsar style output directory? Was this ever used - did this ever work?
        ]
        for directory, prefix in locations:
            if os.path.exists(os.path.join(directory, f"{prefix}stdout")):
                with open(os.path.join(directory, f"{prefix}stdout"), 'rb') as f:
                    tool_stdout = f.read(MAX_STDIO_READ_BYTES)
                with open(os.path.join(directory, f"{prefix}stderr"), 'rb') as f:
                    tool_stderr = f.read(MAX_STDIO_READ_BYTES)
                break
        else:
            if os.path.exists(os.path.join(tool_job_working_directory, 'task_0')):
                # We have a task splitting job
                tool_stdout = b''
                tool_stderr = b''
                paths = Path(tool_job_working_directory).glob('task_*')
                for path in paths:
                    with open(path / 'outputs' / 'tool_stdout', 'rb') as f:
                        task_stdout = f.read(MAX_STDIO_READ_BYTES)
                        if task_stdout:
                            tool_stdout = b"%s[%s stdout]\n%s\n" % (tool_stdout, path.name.encode(), task_stdout)
                    with open(path / 'outputs' / 'tool_stderr', 'rb') as f:
                        task_stderr = f.read(MAX_STDIO_READ_BYTES)
                        if task_stderr:
                            tool_stderr = b"%s[%s stdout]\n%s\n" % (tool_stderr, path.name.encode(), task_stderr)
            else:
                wdc = os.listdir(tool_job_working_directory)
                odc = os.listdir(outputs_directory)
                error_desc = "Failed to find tool_stdout or tool_stderr for this job, cannot collect metadata"
                error_extra = f"Working dir contents [{wdc}], output directory contents [{odc}]"
                log.warn(f"{error_desc}. {error_extra}")
                raise Exception(error_desc)

        job_id_tag = metadata_params["job_id_tag"]

        exit_code_file = default_exit_code_file(".", job_id_tag)
        tool_exit_code = read_exit_code_from(exit_code_file, job_id_tag)

        check_output_detected_state, tool_stdout, tool_stderr, job_messages = check_output(stdio_regexes, stdio_exit_codes, tool_stdout, tool_stderr, tool_exit_code, job_id_tag)
        if check_output_detected_state == DETECTED_JOB_STATE.OK and not tool_provided_metadata.has_failed_outputs():
            final_job_state = Job.states.OK
        else:
            final_job_state = Job.states.ERROR

        version_string_path = os.path.join('outputs', COMMAND_VERSION_FILENAME)
        version_string = collect_shrinked_content_from_path(version_string_path)

        expression_context = ExpressionContext(dict(stdout=tool_stdout[:255], stderr=tool_stderr[:255]))

        # Load outputs.
        export_store = store.DirectoryModelExportStore('metadata/outputs_populated', serialize_dataset_objects=True, for_edit=True, strip_metadata_files=False, serialize_jobs=True)
    try:
        import_model_store = store.imported_store_for_metadata('metadata/outputs_new', object_store=object_store)
    except AssertionError:
        # Remove in 21.09, this should only happen for jobs that started on <= 20.09 and finish now
        import_model_store = None

    tool_script_file = os.path.join(tool_job_working_directory, 'tool_script.sh')
    job = None
    if import_model_store and export_store:
        job = next(iter(import_model_store.sa_session.objects[Job].values()))

    job_context = SessionlessJobContext(
        metadata_params,
        tool_provided_metadata,
        object_store,
        export_store,
        import_model_store,
        os.path.join(tool_job_working_directory, "working"),
        final_job_state=final_job_state,
        max_discovered_files=max_discovered_files,
    )

    if extended_metadata_collection:
        # discover extra outputs...
        output_collections = {}
        for name, output_collection in metadata_params["output_collections"].items():
            # TODO: remove HistoryDatasetCollectionAssociation fallback on 22.01, model_class used to not be serialized prior to 21.09
            model_class = output_collection.get('model_class', 'HistoryDatasetCollectionAssociation')
            collection = import_model_store.sa_session.query(getattr(galaxy.model, model_class)).find(output_collection["id"])
            output_collections[name] = collection
        output_instances = {}
        for name, output in metadata_params["outputs"].items():
            klass = getattr(galaxy.model, output.get('model_class', 'HistoryDatasetAssociation'))
            output_instances[name] = import_model_store.sa_session.query(klass).find(output["id"])

        input_ext = json.loads(metadata_params["job_params"].get("__input_ext") or '"data"')
        try:
            collect_primary_datasets(
                job_context,
                output_instances,
                input_ext=input_ext,
            )
            collect_dynamic_outputs(job_context, output_collections)
        except MaxDiscoveredFilesExceededError as e:
            final_job_state = Job.states.ERROR
            job_messages.append(str(e))
        if job:
            job.job_messages = job_messages
            job.state = final_job_state
        if os.path.exists(tool_script_file):
            with open(tool_script_file) as command_fh:
                command_line_lines = []
                for i, line in enumerate(command_fh):
                    if i == 0 and line.endswith('COMMAND_VERSION 2>&1;'):
                        # Don't record version command as part of command line
                        continue
                    command_line_lines.append(line)
                job.command_line = "".join(command_line_lines).strip()
                export_store.export_job(job, include_job_data=False)

    unnamed_id_to_path = {}
    for unnamed_output_dict in job_context.tool_provided_metadata.get_unnamed_outputs():
        destination = unnamed_output_dict["destination"]
        elements = unnamed_output_dict["elements"]
        destination_type = destination["type"]
        if destination_type == 'hdas':
            for element in elements:
                filename = element.get('filename')
                object_id = element.get('object_id')
                if filename and object_id:
                    unnamed_id_to_path[object_id] = os.path.join(job_context.job_working_directory, filename)

    for output_name, output_dict in outputs.items():
        dataset_instance_id = output_dict["id"]
        klass = getattr(galaxy.model, output_dict.get('model_class', 'HistoryDatasetAssociation'))
        dataset = None
        if import_model_store:
            dataset = import_model_store.sa_session.query(klass).find(dataset_instance_id)
        if dataset is None:
            # legacy check for jobs that started before 21.01, remove on 21.05
            filename_in = os.path.join(f"metadata/metadata_in_{output_name}")
            import pickle
            dataset = pickle.load(open(filename_in, 'rb'))  # load DatasetInstance
        assert dataset is not None

        filename_kwds = os.path.join(f"metadata/metadata_kwds_{output_name}")
        filename_out = os.path.join(f"metadata/metadata_out_{output_name}")
        filename_results_code = os.path.join(f"metadata/metadata_results_{output_name}")
        override_metadata = os.path.join(f"metadata/metadata_override_{output_name}")
        dataset_filename_override = output_dict["filename_override"]
        # pre-20.05 this was a per job parameter and not a per dataset parameter, drop in 21.XX
        legacy_object_store_store_by = metadata_params.get("object_store_store_by", "id")

        # Same block as below...
        set_meta_kwds = stringify_dictionary_keys(json.load(open(filename_kwds)))  # load kwds; need to ensure our keywords are not unicode
        try:
            external_filename = unnamed_id_to_path.get(dataset_instance_id, dataset_filename_override)
            if not os.path.exists(external_filename):
                matches = glob.glob(external_filename)
                assert len(matches) == 1, f"More than one file matched by output glob '{external_filename}'"
                external_filename = matches[0]
                assert safe_contains(tool_job_working_directory, external_filename), f"Cannot collect output '{external_filename}' from outside of working directory"
                created_from_basename = os.path.relpath(external_filename, os.path.join(tool_job_working_directory, 'working'))
                dataset.dataset.created_from_basename = created_from_basename
            # override filename if we're dealing with outputs to working directory and dataset is not linked to
            link_data_only = metadata_params.get("link_data_only")
            if not link_data_only:
                # Only set external filename if we're dealing with files in job working directory.
                # Fixes link_data_only uploads
                dataset.dataset.external_filename = external_filename
                store_by = output_dict.get("object_store_store_by", legacy_object_store_store_by)
                extra_files_dir_name = f"dataset_{getattr(dataset.dataset, store_by)}_files"
                files_path = os.path.abspath(os.path.join(tool_job_working_directory, "working", extra_files_dir_name))
                dataset.dataset.external_extra_files_path = files_path
            file_dict = tool_provided_metadata.get_dataset_meta(output_name, dataset.dataset.id, dataset.dataset.uuid)
            if 'ext' in file_dict:
                dataset.extension = file_dict['ext']
            # Metadata FileParameter types may not be writable on a cluster node, and are therefore temporarily substituted with MetadataTempFiles
            override_metadata = json.load(open(override_metadata))
            for metadata_name, metadata_file_override in override_metadata:
                if MetadataTempFile.is_JSONified_value(metadata_file_override):
                    metadata_file_override = MetadataTempFile.from_JSON(metadata_file_override)
                setattr(dataset.metadata, metadata_name, metadata_file_override)
            if output_dict.get("validate", False):
                set_validated_state(dataset)
            if dataset_instance_id not in unnamed_id_to_path:
                # We're going to run through set_metadata in collect_dynamic_outputs with more contextual metadata,
                # so skip set_meta here.
                set_meta(dataset, file_dict)
                if extended_metadata_collection:
                    collect_extra_files(object_store, dataset, ".")
                    dataset.state = dataset.dataset.state = final_job_state

            if extended_metadata_collection:
                if not link_data_only and os.path.getsize(external_filename):
                    # Here we might be updating a disk based objectstore when outputs_to_working_directory is used,
                    # or a remote object store from its cache path.
                    object_store.update_from_file(dataset.dataset, file_name=external_filename, create=True)
                # TODO: merge expression_context into tool_provided_metadata so we don't have to special case this (here and in _finish_dataset)
                meta = tool_provided_metadata.get_dataset_meta(output_name, dataset.dataset.id, dataset.dataset.uuid)
                if meta:
                    context = ExpressionContext(meta, expression_context)
                else:
                    context = expression_context
                dataset.blurb = 'done'
                dataset.peek = 'no peek'
                dataset.info = (dataset.info or '')
                if context['stdout'].strip():
                    # Ensure white space between entries
                    dataset.info = f"{dataset.info.rstrip()}\n{context['stdout'].strip()}"
                if context['stderr'].strip():
                    # Ensure white space between entries
                    dataset.info = f"{dataset.info.rstrip()}\n{context['stderr'].strip()}"
                dataset.tool_version = version_string
                if 'uuid' in context:
                    dataset.dataset.uuid = context['uuid']
                if not final_job_state == Job.states.ERROR:
                    line_count = context.get('line_count', None)
                    try:
                        # Certain datatype's set_peek methods contain a line_count argument
                        dataset.set_peek(line_count=line_count)
                    except TypeError:
                        # ... and others don't
                        dataset.set_peek()
                for context_key in TOOL_PROVIDED_JOB_METADATA_KEYS:
                    if context_key in context:
                        context_value = context[context_key]
                        setattr(dataset, context_key, context_value)
                # We only want to persist the external_filename if the dataset has been linked in.
                if not link_data_only:
                    dataset.dataset.external_filename = None
                    dataset.dataset.extra_files_path = None
                export_store.add_dataset(dataset)
            else:
                dataset.metadata.to_JSON_dict(filename_out)  # write out results of set_meta

            json.dump((True, 'Metadata has been set successfully'), open(filename_results_code, 'wt+'))  # setting metadata has succeeded
        except Exception:
            json.dump((False, traceback.format_exc()), open(filename_results_code, 'wt+'))  # setting metadata has failed somehow

    if export_store:
        export_store._finalize()
    write_job_metadata(tool_job_working_directory, job_metadata, set_meta, tool_provided_metadata)
Esempio n. 23
0
 def initialize_state(trans, inputs, state, context=None):
     context = ExpressionContext(state, context)
     for input in inputs.itervalues():
         state[input.name] = input.get_initial_value(trans, context)