예제 #1
0
파일: datasets.py 프로젝트: yin-max/galaxy
 def get_content_as_text(self, trans, dataset_id):
     """ Returns item content as Text. """
     decoded_id = self.decode_id(dataset_id)
     dataset = self.hda_manager.get_accessible(decoded_id, trans.user)
     dataset = self.hda_manager.error_if_uploading(dataset)
     if dataset is None:
         raise galaxy_exceptions.MessageException("Dataset not found.")
     truncated, dataset_data = self.hda_manager.text_data(dataset, preview=True)
     item_url = web.url_for(controller='dataset', action='display_by_username_and_slug', username=dataset.history.user.username, slug=trans.security.encode_id(dataset.id), preview=False)
     return {
         "item_data": dataset_data,
         "truncated": truncated,
         "item_url": item_url,
     }
예제 #2
0
    def workflow_dict( self, trans, workflow_id, **kwd ):
        """
        GET /api/workflows/{encoded_workflow_id}/download
        Returns a selected workflow as a json dictionary.
        """
        stored_workflow = self.__get_stored_accessible_workflow( trans, workflow_id )

        style = kwd.get("style", "export")
        ret_dict = self.workflow_contents_manager.workflow_to_dict( trans, stored_workflow, style=style )
        if not ret_dict:
            # This workflow has a tool that's missing from the distribution
            message = "Workflow cannot be exported due to missing tools."
            raise exceptions.MessageException( message )
        return ret_dict
예제 #3
0
def _normalize_inputs(steps, inputs, inputs_by):
    normalized_inputs = {}
    for step in steps:
        if step.type not in INPUT_STEP_TYPES:
            continue
        possible_input_keys = []
        for inputs_by_el in inputs_by.split("|"):
            if inputs_by_el == "step_id":
                possible_input_keys.append(str(step.id))
            elif inputs_by_el == "step_index":
                possible_input_keys.append(str(step.order_index))
            elif inputs_by_el == "step_uuid":
                possible_input_keys.append(str(step.uuid))
            elif inputs_by_el == "name":
                possible_input_keys.append(step.label
                                           or step.tool_inputs.get('name'))
            else:
                raise exceptions.MessageException(
                    "Workflow cannot be run because unexpected inputs_by value specified."
                )
        inputs_key = None
        for possible_input_key in possible_input_keys:
            if possible_input_key in inputs:
                inputs_key = possible_input_key
        default_value = step.tool_inputs.get("default")
        optional = step.tool_inputs.get("optional") or False
        # Need to be careful here to make sure 'default' has correct type - not sure how to do that
        # but asserting 'optional' is definitely a bool and not a String->Bool or something is a good
        # start to ensure tool state is being preserved and loaded in a type safe way.
        assert isinstance(optional, bool)
        if not inputs_key and default_value is None and not optional:
            message = "Workflow cannot be run because an expected input step '{}' ({}) is not optional and no input.".format(
                step.id, step.label)
            raise exceptions.MessageException(message)
        if inputs_key:
            normalized_inputs[step.id] = inputs[inputs_key]
    return normalized_inputs
예제 #4
0
 def __api_import_from_archive(self, trans, archive_data, source=None):
     try:
         data = json.loads(archive_data)
     except Exception:
         if "GalaxyWorkflow" in archive_data:
             data = {"yaml_content": archive_data}
         else:
             raise exceptions.MessageException(
                 "The data content does not appear to be a valid workflow.")
     if not data:
         raise exceptions.MessageException("The data content is missing.")
     raw_workflow_description = self.__normalize_workflow(trans, data)
     workflow, missing_tool_tups = self._workflow_from_dict(
         trans, raw_workflow_description, source=source)
     workflow = workflow.latest_workflow
     if workflow.has_errors:
         return {
             "message":
             "Imported, but some steps in this workflow have validation errors.",
             "status": "error"
         }
     elif len(workflow.steps) == 0:
         return {
             "message": "Imported, but this workflow has no steps.",
             "status": "error"
         }
     elif workflow.has_cycles:
         return {
             "message": "Imported, but this workflow contains cycles.",
             "status": "error"
         }
     return {
         "message":
         "Workflow '%s' imported successfully." % escape(workflow.name),
         "status":
         "success"
     }
예제 #5
0
    def import_shared_workflow(self, trans, payload, **kwd):
        """
        POST /api/workflows/import
        Import a workflow shared by other users.

        :param  workflow_id:      the workflow id (required)
        :type   workflow_id:      str

        :raises: exceptions.MessageException, exceptions.ObjectNotFound
        """
        # Pull parameters out of payload.
        workflow_id = payload.get('workflow_id', None)
        if workflow_id is None:
            raise exceptions.ObjectAttributeMissingException(
                "Missing required parameter 'workflow_id'.")
        try:
            stored_workflow = self.get_stored_workflow(trans,
                                                       workflow_id,
                                                       check_ownership=False)
        except:
            raise exceptions.ObjectNotFound(
                "Malformed workflow id ( %s ) specified." % workflow_id)
        if stored_workflow.importable is False:
            raise exceptions.MessageException(
                'The owner of this workflow has disabled imports via this link.'
            )
        elif stored_workflow.deleted:
            raise exceptions.MessageException(
                "You can't import this workflow because it has been deleted.")
        imported_workflow = self._import_shared_workflow(
            trans, stored_workflow)
        item = imported_workflow.to_dict(
            value_mapper={'id': trans.security.encode_id})
        encoded_id = trans.security.encode_id(imported_workflow.id)
        item['url'] = url_for('workflow', id=encoded_id)
        return item
예제 #6
0
def populate_module_and_state(trans,
                              workflow,
                              param_map,
                              allow_tool_state_corrections=False):
    """ Used by API but not web controller, walks through a workflow's steps
    and populates transient module and state attributes on each.
    """
    module_injector = WorkflowModuleInjector(trans)
    for step in workflow.steps:
        step_args = param_map.get(step.id, {})
        step_errors = module_injector.inject(step,
                                             step_args=step_args,
                                             source="json")
        if step.type == 'tool' or step.type is None:
            if step_errors:
                message = "Workflow cannot be run because of validation errors in some steps: %s" % step_errors
                raise exceptions.MessageException(message)
            if step.upgrade_messages:
                if allow_tool_state_corrections:
                    log.debug('Workflow step "%i" had upgrade messages: %s',
                              step.id, step.upgrade_messages)
                else:
                    message = "Workflow cannot be run because of step upgrade messages: %s" % step.upgrade_messages
                    raise exceptions.MessageException(message)
예제 #7
0
 def __attempt_add_to_linked_match(self, input_name, hdca,
                                   collection_type_description,
                                   subcollection_type):
     structure = get_structure(hdca,
                               collection_type_description,
                               leaf_subcollection_type=subcollection_type)
     if not self.linked_structure:
         self.linked_structure = structure
         self.collections[input_name] = hdca
         self.subcollection_types[input_name] = subcollection_type
     else:
         if not self.linked_structure.can_match(structure):
             raise exceptions.MessageException(CANNOT_MATCH_ERROR_MESSAGE)
         self.collections[input_name] = hdca
         self.subcollection_types[input_name] = subcollection_type
예제 #8
0
    def execute( self, trans, progress, invocation, step ):
        tool = trans.app.toolbox.get_tool( step.tool_id, tool_version=step.tool_version )
        tool_state = step.state
        # Not strictly needed - but keep Tool state clean by stripping runtime
        # metadata parameters from it.
        if RUNTIME_STEP_META_STATE_KEY in tool_state.inputs:
            del tool_state.inputs[ RUNTIME_STEP_META_STATE_KEY ]
        collections_to_match = self._find_collections_to_match( tool, progress, step )
        # Have implicit collections...
        if collections_to_match.has_collections():
            collection_info = self.trans.app.dataset_collections_service.match_collections( collections_to_match )
        else:
            collection_info = None

        param_combinations = []
        if collection_info:
            iteration_elements_iter = collection_info.slice_collections()
        else:
            iteration_elements_iter = [ None ]

        for iteration_elements in iteration_elements_iter:
            execution_state = tool_state.copy()
            # TODO: Move next step into copy()
            execution_state.inputs = make_dict_copy( execution_state.inputs )

            # Connect up
            def callback( input, value, prefixed_name, prefixed_label ):
                replacement = None
                if isinstance( input, DataToolParameter ) or isinstance( input, DataCollectionToolParameter ):
                    if iteration_elements and prefixed_name in iteration_elements:
                        if isinstance( input, DataToolParameter ):
                            # Pull out dataset instance from element.
                            replacement = iteration_elements[ prefixed_name ].dataset_instance
                        else:
                            # If collection - just use element model object.
                            replacement = iteration_elements[ prefixed_name ]
                    else:
                        replacement = progress.replacement_for_tool_input( step, input, prefixed_name )
                return replacement
            try:
                # Replace DummyDatasets with historydatasetassociations
                visit_input_values( tool.inputs, execution_state.inputs, callback )
            except KeyError, k:
                message_template = "Error due to input mapping of '%s' in '%s'.  A common cause of this is conditional outputs that cannot be determined until runtime, please review your workflow."
                message = message_template % (tool.name, k.message)
                raise exceptions.MessageException( message )
            param_combinations.append( execution_state.inputs )
예제 #9
0
    def __authorize_job_access(self, trans, encoded_job_id, **kwargs):
        for key in ["path", "job_key"]:
            if key not in kwargs:
                error_message = "Job files action requires a valid '%s'." % key
                raise exceptions.ObjectAttributeMissingException(error_message)

        job_id = trans.security.decode_id(encoded_job_id)
        job_key = trans.security.encode_id(job_id, kind="jobs_files")
        if not util.safe_str_cmp(kwargs["job_key"], job_key):
            raise exceptions.ItemAccessibilityException(
                "Invalid job_key supplied.")

        # Verify job is active. Don't update the contents of complete jobs.
        job = trans.sa_session.query(model.Job).get(job_id)
        if job.finished:
            error_message = "Attempting to read or modify the files of a job that has already completed."
            raise exceptions.MessageException(error_message)
        return job
예제 #10
0
파일: histories.py 프로젝트: maikenp/galaxy
    def get_ready_jeha(self, trans, history_id, jeha_id="latest"):
        history = self._history(trans, history_id)
        matching_exports = history.exports
        if jeha_id != "latest":
            decoded_jeha_id = trans.security.decode_id(jeha_id)
            matching_exports = [
                e for e in matching_exports if e.id == decoded_jeha_id
            ]
        if len(matching_exports) == 0:
            raise glx_exceptions.ObjectNotFound(
                "Failed to find target history export")

        jeha = matching_exports[0]
        if not jeha.ready:
            raise glx_exceptions.MessageException(
                "Export not available or not yet ready.")

        return jeha
예제 #11
0
def _get_target_history(trans, workflow, payload, param_keys=None, index=0):
    param_keys = param_keys or []
    history_name = payload.get('new_history_name', None)
    history_id = payload.get('history_id', None)
    history_param = payload.get('history', None)
    if [history_name, history_id, history_param].count(None) < 2:
        raise exceptions.RequestParameterInvalidException(
            "Specified workflow target history multiple ways - at most one of 'history', 'history_id', and 'new_history_name' may be specified."
        )
    if history_param:
        if history_param.startswith('hist_id='):
            history_id = history_param[8:]
        else:
            history_name = history_param
    if history_id:
        history_manager = histories.HistoryManager(trans.app)
        target_history = history_manager.get_owned(
            trans.security.decode_id(history_id),
            trans.user,
            current_history=trans.history)
    else:
        if history_name:
            nh_name = history_name
        else:
            nh_name = 'History from %s workflow' % workflow.name
        if len(param_keys) <= index:
            raise exceptions.MessageException(
                "Incorrect expansion of workflow batch parameters.")
        ids = param_keys[index]
        nids = len(ids)
        if nids == 1:
            nh_name = '{} on {}'.format(nh_name, ids[0])
        elif nids > 1:
            nh_name = '{} on {} and {}'.format(nh_name, ', '.join(ids[0:-1]),
                                               ids[-1])
        new_history = trans.app.model.History(user=trans.user, name=nh_name)
        trans.sa_session.add(new_history)
        target_history = new_history
    return target_history
예제 #12
0
    def _get_users(
            self,
            trans,
            emails_or_ids: Optional[List] = None
    ) -> Tuple[Set[User], Set[str]]:
        if emails_or_ids is None:
            raise exceptions.MessageException(
                "Missing required user IDs or emails")
        send_to_users: Set[User] = set()
        send_to_err: Set[str] = set()
        for email_or_id in set(emails_or_ids):
            email_or_id = email_or_id.strip()
            if not email_or_id:
                continue

            send_to_user = None
            if '@' in email_or_id:
                email_address = email_or_id
                send_to_user = self.manager.user_manager.by_email(
                    email_address, filters=[User.table.c.deleted == false()])
            else:
                try:
                    decoded_user_id = trans.security.decode_id(email_or_id)
                    send_to_user = self.manager.user_manager.by_id(
                        decoded_user_id)
                    if send_to_user.deleted:
                        send_to_user = None
                except exceptions.MalformedId:
                    send_to_user = None

            if not send_to_user:
                send_to_err.add(f"{email_or_id} is not a valid Galaxy user.")
            elif send_to_user == trans.user:
                send_to_err.add("You cannot share resources with yourself.")
            else:
                send_to_users.add(send_to_user)

        return send_to_users, send_to_err
예제 #13
0
    def __connect_workflow_steps( self, steps, steps_by_external_id ):
        """ Second pass to deal with connections between steps.

        Create workflow connection objects using externally specified ids
        using during creation or update.
        """
        for step in steps:
            # Input connections
            for input_name, conn_list in step.temp_input_connections.iteritems():
                if not conn_list:
                    continue
                if not isinstance(conn_list, list):  # Older style singleton connection
                    conn_list = [conn_list]
                for conn_dict in conn_list:
                    if 'output_name' not in conn_dict or 'id' not in conn_dict:
                        template = "Invalid connection [%s] - must be dict with output_name and id fields."
                        message = template % conn_dict
                        raise exceptions.MessageException(message)
                    conn = model.WorkflowStepConnection()
                    conn.input_step = step
                    conn.input_name = input_name
                    conn.output_name = conn_dict['output_name']
                    conn.output_step = steps_by_external_id[ conn_dict['id'] ]
            del step.temp_input_connections
예제 #14
0
    def archive_download( self, trans, id, jeha_id, **kwds ):
        """
        export_download( self, trans, id, jeha_id )
        * GET /api/histories/{id}/exports/{jeha_id}:
            If ready and available, return raw contents of exported history.
            Use/poll "PUT /api/histories/{id}/exports" to initiate the creation
            of such an export - when ready that route will return 200 status
            code (instead of 202) with a JSON dictionary containing a
            `download_url`.
        """
        # Seems silly to put jeha_id in here, but want GET to be immuatable?
        # and this is being accomplished this way.
        history = self.history_manager.get_accessible( self.decode_id( id ), trans.user, current_history=trans.history )
        matching_exports = [e for e in history.exports if trans.security.encode_id( e.id ) == jeha_id]
        if not matching_exports:
            raise exceptions.ObjectNotFound()

        jeha = matching_exports[ 0 ]
        if not jeha.ready:
            # User should not have been given this URL, PUT export should have
            # return a 202.
            raise exceptions.MessageException( "Export not available or not yet ready." )

        return self.serve_ready_history_export( trans, jeha )
예제 #15
0
    def create(self, trans, payload, **kwd):
        """
        POST /api/workflows

        Run or create workflows from the api.

        If installed_repository_file or from_history_id is specified a new
        workflow will be created for this user. Otherwise, workflow_id must be
        specified and this API method will cause a workflow to execute.

        :param  installed_repository_file    The path of a workflow to import. Either workflow_id, installed_repository_file or from_history_id must be specified
        :type   installed_repository_file    str

        :param  workflow_id:                 An existing workflow id. Either workflow_id, installed_repository_file or from_history_id must be specified
        :type   workflow_id:                 str

        :param  parameters:                  If workflow_id is set - see _update_step_parameters()
        :type   parameters:                  dict

        :param  ds_map:                      If workflow_id is set - a dictionary mapping each input step id to a dictionary with 2 keys: 'src' (which can be 'ldda', 'ld' or 'hda') and 'id' (which should be the id of a LibraryDatasetDatasetAssociation, LibraryDataset or HistoryDatasetAssociation respectively)
        :type   ds_map:                      dict

        :param  no_add_to_history:           If workflow_id is set - if present in the payload with any value, the input datasets will not be added to the selected history
        :type   no_add_to_history:           str

        :param  history:                     If workflow_id is set - optional history where to run the workflow, either the name of a new history or "hist_id=HIST_ID" where HIST_ID is the id of an existing history. If not specified, the workflow will be run a new unnamed history
        :type   history:                     str

        :param  replacement_params:          If workflow_id is set - an optional dictionary used when renaming datasets
        :type   replacement_params:          dict

        :param  from_history_id:             Id of history to extract a workflow from. Either workflow_id, installed_repository_file or from_history_id must be specified
        :type   from_history_id:             str

        :param  job_ids:                     If from_history_id is set - optional list of jobs to include when extracting a workflow from history
        :type   job_ids:                     str

        :param  dataset_ids:                 If from_history_id is set - optional list of HDA `hid`s corresponding to workflow inputs when extracting a workflow from history
        :type   dataset_ids:                 str

        :param  dataset_collection_ids:      If from_history_id is set - optional list of HDCA `hid`s corresponding to workflow inputs when extracting a workflow from history
        :type   dataset_collection_ids:      str

        :param  workflow_name:               If from_history_id is set - name of the workflow to create when extracting a workflow from history
        :type   workflow_name:               str

        :param  allow_tool_state_corrections:  If set to True, any Tool parameter changes will not prevent running workflow, defaults to False
        :type   allow_tool_state_corrections:  bool

        :param use_cached_job:               If set to True galaxy will attempt to find previously executed steps for all workflow steps with the exact same parameter combinations
                                             and will copy the outputs of the previously executed step.
        """
        ways_to_create = set([
            'archive_source',
            'workflow_id',
            'installed_repository_file',
            'from_history_id',
            'from_path',
            'shared_workflow_id',
            'workflow',
        ])

        if len(ways_to_create.intersection(payload)) == 0:
            message = "One parameter among - %s - must be specified" % ", ".join(
                ways_to_create)
            raise exceptions.RequestParameterMissingException(message)

        if len(ways_to_create.intersection(payload)) > 1:
            message = "Only one parameter among - %s - must be specified" % ", ".join(
                ways_to_create)
            raise exceptions.RequestParameterInvalidException(message)

        if 'installed_repository_file' in payload:
            if not trans.user_is_admin:
                raise exceptions.AdminRequiredException()
            installed_repository_file = payload.get(
                'installed_repository_file', '')
            if not os.path.exists(installed_repository_file):
                raise exceptions.MessageException(
                    "Repository file '%s' not found.")
            elif os.path.getsize(
                    os.path.abspath(installed_repository_file)) > 0:
                workflow_data = None
                with open(installed_repository_file, 'rb') as f:
                    workflow_data = f.read()
                return self.__api_import_from_archive(trans, workflow_data)
            else:
                raise exceptions.MessageException(
                    "You attempted to open an empty file.")

        if 'archive_source' in payload:
            archive_source = payload['archive_source']
            archive_file = payload.get('archive_file')
            archive_data = None
            if archive_source:
                if archive_source.startswith("file://"):
                    if not trans.user_is_admin:
                        raise exceptions.AdminRequiredException()
                    workflow_src = {
                        "src": "from_path",
                        "path": archive_source[len("file://"):]
                    }
                    payload["workflow"] = workflow_src
                    return self.__api_import_new_workflow(
                        trans, payload, **kwd)
                else:
                    try:
                        archive_data = requests.get(archive_source).text
                    except Exception:
                        raise exceptions.MessageException(
                            "Failed to open URL '%s'." %
                            escape(archive_source))
            elif hasattr(archive_file, 'file'):
                uploaded_file = archive_file.file
                uploaded_file_name = uploaded_file.name
                if os.path.getsize(os.path.abspath(uploaded_file_name)) > 0:
                    archive_data = uploaded_file.read()
                else:
                    raise exceptions.MessageException(
                        "You attempted to upload an empty file.")
            else:
                raise exceptions.MessageException(
                    "Please provide a URL or file.")
            return self.__api_import_from_archive(trans, archive_data,
                                                  "uploaded file")

        if 'from_history_id' in payload:
            from_history_id = payload.get('from_history_id')
            from_history_id = self.decode_id(from_history_id)
            history = self.history_manager.get_accessible(
                from_history_id, trans.user, current_history=trans.history)

            job_ids = [self.decode_id(_) for _ in payload.get('job_ids', [])]
            dataset_ids = payload.get('dataset_ids', [])
            dataset_collection_ids = payload.get('dataset_collection_ids', [])
            workflow_name = payload['workflow_name']
            stored_workflow = extract_workflow(
                trans=trans,
                user=trans.get_user(),
                history=history,
                job_ids=job_ids,
                dataset_ids=dataset_ids,
                dataset_collection_ids=dataset_collection_ids,
                workflow_name=workflow_name,
            )
            item = stored_workflow.to_dict(
                value_mapper={'id': trans.security.encode_id})
            item['url'] = url_for('workflow', id=item['id'])
            return item

        if 'from_path' in payload:
            from_path = payload.get('from_path')
            payload["workflow"] = {"src": "from_path", "path": from_path}
            return self.__api_import_new_workflow(trans, payload, **kwd)

        if 'shared_workflow_id' in payload:
            workflow_id = payload['shared_workflow_id']
            return self.__api_import_shared_workflow(trans, workflow_id,
                                                     payload)

        if 'workflow' in payload:
            return self.__api_import_new_workflow(trans, payload, **kwd)

        workflow_id = payload.get('workflow_id', None)
        if not workflow_id:
            message = "Invalid workflow_id specified."
            raise exceptions.RequestParameterInvalidException(message)

        # Get workflow + accessibility check.
        stored_workflow = self.__get_stored_accessible_workflow(
            trans, workflow_id)
        workflow = stored_workflow.latest_workflow

        run_configs = build_workflow_run_configs(trans, workflow, payload)
        assert len(run_configs) == 1
        run_config = run_configs[0]
        history = run_config.target_history

        # invoke may throw MessageExceptions on tool erors, failure
        # to match up inputs, etc...
        outputs, invocation = invoke(
            trans=trans,
            workflow=workflow,
            workflow_run_config=run_config,
            populate_state=True,
        )
        trans.sa_session.flush()

        # Build legacy output - should probably include more information from
        # outputs.
        rval = {}
        rval['history'] = trans.security.encode_id(history.id)
        rval['outputs'] = []
        if outputs:
            # Newer outputs don't necessarily fill outputs (?)
            for step in workflow.steps:
                if step.type == 'tool' or step.type is None:
                    for v in outputs[step.id].values():
                        rval['outputs'].append(trans.security.encode_id(v.id))

        # Newer version of this API just returns the invocation as a dict, to
        # facilitate migration - produce the newer style response and blend in
        # the older information.
        invocation_response = self.__encode_invocation(invocation, **kwd)
        invocation_response.update(rval)
        return invocation_response
예제 #16
0
    def _create(self, trans, payload, **kwd):
        # HACK: for now, if action is rerun, rerun tool.
        action = payload.get('action', None)
        if action == 'rerun':
            return self._rerun_tool(trans, payload, **kwd)

        # -- Execute tool. --

        # Get tool.
        tool_version = payload.get('tool_version', None)
        tool = trans.app.toolbox.get_tool(
            payload['tool_id'], tool_version) if 'tool_id' in payload else None
        if not tool or not tool.allow_user_access(trans.user):
            raise exceptions.MessageException(
                'Tool not found or not accessible.')
        if trans.app.config.user_activation_on:
            if not trans.user:
                log.warning(
                    "Anonymous user attempts to execute tool, but account activation is turned on."
                )
            elif not trans.user.active:
                log.warning(
                    "User \"%s\" attempts to execute tool, but account activation is turned on and user account is not active."
                    % trans.user.email)

        # Set running history from payload parameters.
        # History not set correctly as part of this API call for
        # dataset upload.
        history_id = payload.get('history_id', None)
        if history_id:
            decoded_id = self.decode_id(history_id)
            target_history = self.history_manager.get_owned(
                decoded_id, trans.user, current_history=trans.history)
        else:
            target_history = None

        # Set up inputs.
        inputs = payload.get('inputs', {})
        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.items():
            if k.startswith('files_') or k.startswith('__files_'):
                inputs[k] = v

        # for inputs that are coming from the Library, copy them into the history
        input_patch = {}
        for k, v in inputs.items():
            if isinstance(v, dict) and v.get('src',
                                             '') == 'ldda' and 'id' in v:
                ldda = trans.sa_session.query(
                    trans.app.model.LibraryDatasetDatasetAssociation).get(
                        self.decode_id(v['id']))
                if trans.user_is_admin(
                ) or trans.app.security_agent.can_access_dataset(
                        trans.get_current_user_roles(), ldda.dataset):
                    input_patch[k] = ldda.to_history_dataset_association(
                        target_history, add_to_history=True)

        for k, v in input_patch.items():
            inputs[k] = v

        # TODO: encode data ids and decode ids.
        # TODO: handle dbkeys
        params = util.Params(inputs, sanitize=False)
        incoming = params.__dict__

        # use_cached_job can be passed in via the top-level payload or among the tool inputs.
        # I think it should be a top-level parameter, but because the selector is implemented
        # as a regular tool parameter we accept both.
        use_cached_job = payload.get('use_cached_job',
                                     False) or util.string_as_bool(
                                         inputs.get('use_cached_job', 'false'))
        vars = tool.handle_input(trans,
                                 incoming,
                                 history=target_history,
                                 use_cached_job=use_cached_job)

        # TODO: check for errors and ensure that output dataset(s) are available.
        output_datasets = vars.get('out_data', [])
        rval = {
            'outputs': [],
            'output_collections': [],
            'jobs': [],
            'implicit_collections': []
        }

        job_errors = vars.get('job_errors', [])
        if job_errors:
            # If we are here - some jobs were successfully executed but some failed.
            rval['errors'] = job_errors

        outputs = rval['outputs']
        # TODO:?? poss. only return ids?
        for output_name, output in output_datasets:
            output_dict = output.to_dict()
            # add the output name back into the output data structure
            # so it's possible to figure out which newly created elements
            # correspond with which tool file outputs
            output_dict['output_name'] = output_name
            outputs.append(
                trans.security.encode_dict_ids(output_dict,
                                               skip_startswith="metadata_"))

        for job in vars.get('jobs', []):
            rval['jobs'].append(
                self.encode_all_ids(trans,
                                    job.to_dict(view='collection'),
                                    recursive=True))

        for output_name, collection_instance in vars.get(
                'output_collections', []):
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(
                collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['output_collections'].append(output_dict)

        for output_name, collection_instance in vars.get(
                'implicit_collections', {}).items():
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(
                collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['implicit_collections'].append(output_dict)

        return rval
예제 #17
0
    def create(
        self,
        trans: ProvidesHistoryContext,
        payload: CreateHistoryPayload,
        serialization_params: SerializationParams,
    ):
        """Create a new history from scratch, by copying an existing one or by importing
        from URL or File depending on the provided parameters in the payload.
        """
        if trans.anonymous:
            raise glx_exceptions.AuthenticationRequired(
                "You need to be logged in to create histories.")
        if trans.user and trans.user.bootstrap_admin_user:
            raise glx_exceptions.RealUserRequiredException(
                "Only real users can create histories.")
        hist_name = None
        if payload.name is not None:
            hist_name = restore_text(payload.name)
        copy_this_history_id = payload.history_id
        all_datasets = payload.all_datasets

        if payload.archive_source is not None or hasattr(
                payload.archive_file, "file"):
            archive_source = payload.archive_source
            archive_file = payload.archive_file
            if archive_source:
                archive_type = payload.archive_type
            elif archive_file is not None and hasattr(archive_file, "file"):
                archive_source = archive_file.file.name
                archive_type = HistoryImportArchiveSourceType.file
                if isinstance(archive_file.file, SpooledTemporaryFile):
                    archive_source = self._save_upload_file_tmp(archive_file)
            else:
                raise glx_exceptions.MessageException(
                    "Please provide a url or file.")
            job = self.manager.queue_history_import(
                trans,
                archive_type=archive_type,
                archive_source=archive_source)
            job_dict = job.to_dict()
            job_dict[
                "message"] = f"Importing history from source '{archive_source}'. This history will be visible when the import is complete."
            job_dict = trans.security.encode_all_ids(job_dict)
            return JobImportHistoryResponse.parse_obj(job_dict)

        new_history = None
        # if a history id was passed, copy that history
        if copy_this_history_id:
            decoded_id = self.decode_id(copy_this_history_id)
            original_history = self.manager.get_accessible(
                decoded_id, trans.user, current_history=trans.history)
            hist_name = hist_name or (f"Copy of '{original_history.name}'")
            new_history = original_history.copy(name=hist_name,
                                                target_user=trans.user,
                                                all_datasets=all_datasets)

        # otherwise, create a new empty history
        else:
            new_history = self.manager.create(user=trans.user, name=hist_name)

        trans.app.security_agent.history_set_default_permissions(new_history)
        trans.sa_session.add(new_history)
        trans.sa_session.flush()

        # an anonymous user can only have one history
        if self.user_manager.is_anonymous(trans.user):
            self.manager.set_current(trans, new_history)

        return self._serialize_history(trans, new_history,
                                       serialization_params)
예제 #18
0
def build_workflow_run_configs( trans, workflow, payload ):
    app = trans.app
    allow_tool_state_corrections = payload.get( 'allow_tool_state_corrections', False )

    # Sanity checks.
    if len( workflow.steps ) == 0:
        raise exceptions.MessageException( "Workflow cannot be run because it does not have any steps" )
    if workflow.has_cycles:
        raise exceptions.MessageException( "Workflow cannot be run because it contains cycles" )

    if 'step_parameters' in payload and 'parameters' in payload:
        raise exceptions.RequestParameterInvalidException( "Cannot specify both legacy parameters and step_parameters attributes." )
    if 'inputs' in payload and 'ds_map' in payload:
        raise exceptions.RequestParameterInvalidException( "Cannot specify both legacy ds_map and input attributes." )

    add_to_history = 'no_add_to_history' not in payload
    legacy = payload.get( 'legacy', False )
    already_normalized = payload.get( 'parameters_normalized', False )
    raw_parameters = payload.get( 'parameters', {} )

    run_configs = []
    unexpanded_param_map = _normalize_step_parameters( workflow.steps, raw_parameters, legacy=legacy, already_normalized=already_normalized )
    expanded_params, expanded_param_keys = expand_workflow_inputs( unexpanded_param_map )
    for index, param_map in enumerate( expanded_params ):
        history = _get_target_history(trans, workflow, payload, expanded_param_keys, index)
        inputs = payload.get( 'inputs', None )
        inputs_by = payload.get( 'inputs_by', None )
        # New default is to reference steps by index of workflow step
        # which is intrinsic to the workflow and independent of the state
        # of Galaxy at the time of workflow import.
        default_inputs_by = 'step_index|step_uuid'
        if inputs is None:
            # Default to legacy behavior - read ds_map and reference steps
            # by unencoded step id (a raw database id).
            inputs = payload.get( 'ds_map', {} )
            if legacy:
                default_inputs_by = 'step_id|step_uuid'
            inputs_by = inputs_by or default_inputs_by
        else:
            inputs = inputs or {}
        inputs_by = inputs_by or default_inputs_by
        if inputs or not already_normalized:
            normalized_inputs = _normalize_inputs( workflow.steps, inputs, inputs_by )
        else:
            # Only allow dumping IDs directly into JSON database instead of properly recording the
            # inputs with referential integrity if parameters are already normalized (coming from tool form).
            normalized_inputs = {}

        steps_by_id = workflow.steps_by_id
        # Set workflow inputs.
        for key, input_dict in normalized_inputs.iteritems():
            step = steps_by_id[key]
            if step.type == 'parameter_input':
                continue
            if 'src' not in input_dict:
                raise exceptions.RequestParameterInvalidException( "Not input source type defined for input '%s'." % input_dict )
            if 'id' not in input_dict:
                raise exceptions.RequestParameterInvalidException( "Not input id defined for input '%s'." % input_dict )
            if 'content' in input_dict:
                raise exceptions.RequestParameterInvalidException( "Input cannot specify explicit 'content' attribute %s'." % input_dict )
            input_source = input_dict[ 'src' ]
            input_id = input_dict[ 'id' ]
            try:
                if input_source == 'ldda':
                    ldda = trans.sa_session.query( app.model.LibraryDatasetDatasetAssociation ).get( trans.security.decode_id( input_id ) )
                    assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset )
                    content = ldda.to_history_dataset_association( history, add_to_history=add_to_history )
                elif input_source == 'ld':
                    ldda = trans.sa_session.query( app.model.LibraryDataset ).get( trans.security.decode_id( input_id ) ).library_dataset_dataset_association
                    assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset )
                    content = ldda.to_history_dataset_association( history, add_to_history=add_to_history )
                elif input_source == 'hda':
                    # Get dataset handle, add to dict and history if necessary
                    content = trans.sa_session.query( app.model.HistoryDatasetAssociation ).get( trans.security.decode_id( input_id ) )
                    assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), content.dataset )
                elif input_source == 'uuid':
                    dataset = trans.sa_session.query( app.model.Dataset ).filter( app.model.Dataset.uuid == input_id ).first()
                    if dataset is None:
                        # this will need to be changed later. If federation code is avalible, then a missing UUID
                        # could be found amoung fereration partners
                        raise exceptions.RequestParameterInvalidException( "Input cannot find UUID: %s." % input_id )
                    assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), dataset )
                    content = history.add_dataset( dataset )
                elif input_source == 'hdca':
                    content = app.dataset_collections_service.get_dataset_collection_instance( trans, 'history', input_id )
                else:
                    raise exceptions.RequestParameterInvalidException( "Unknown workflow input source '%s' specified." % input_source )
                if add_to_history and content.history != history:
                    content = content.copy()
                    if isinstance( content, app.model.HistoryDatasetAssociation ):
                        history.add_dataset( content )
                    else:
                        history.add_dataset_collection( content )
                input_dict[ 'content' ] = content
            except AssertionError:
                raise exceptions.ItemAccessibilityException( "Invalid workflow input '%s' specified" % input_id )
        for key in set( normalized_inputs.keys() ):
            value = normalized_inputs[ key ]
            if isinstance( value, dict ) and 'content' in value:
                normalized_inputs[ key ] = value[ 'content' ]
            else:
                normalized_inputs[ key ] = value
        run_configs.append(WorkflowRunConfig(
            target_history=history,
            replacement_dict=payload.get( 'replacement_params', {} ),
            inputs=normalized_inputs,
            param_map=param_map,
            allow_tool_state_corrections=allow_tool_state_corrections
        ))

    return run_configs
예제 #19
0
파일: users.py 프로젝트: msauria/galaxy
    def purge(self, user, flush=True):
        """Purge the given user. They must have the deleted flag already."""
        if not self.app.config.allow_user_deletion:
            raise exceptions.ConfigDoesNotAllowException(
                'The configuration of this Galaxy instance does not allow admins to delete or purge users.'
            )
        if not user.deleted:
            raise exceptions.MessageException(
                'User \'%s\' has not been deleted, so they cannot be purged.' %
                user.email)
        private_role = self.app.security_agent.get_private_user_role(user)
        # Delete History
        for active_history in user.active_histories:
            self.session().refresh(active_history)
            for hda in active_history.active_datasets:
                # Delete HistoryDatasetAssociation
                hda.deleted = True
                self.session().add(hda)
            active_history.deleted = True
            self.session().add(active_history)
        # Delete UserGroupAssociations
        for uga in user.groups:
            self.session().delete(uga)
        # Delete UserRoleAssociations EXCEPT FOR THE PRIVATE ROLE
        for ura in user.roles:
            if ura.role_id != private_role.id:
                self.session().delete(ura)
        # Delete UserAddresses
        for address in user.addresses:
            self.session().delete(address)
        compliance_log = logging.getLogger('COMPLIANCE')
        compliance_log.info(f'delete-user-event: {user.username}')
        # Maybe there is some case in the future where an admin needs
        # to prove that a user was using a server for some reason (e.g.
        # a court case.) So we make this painfully hard to recover (and
        # not immediately reversable) in line with GDPR, but still
        # leave open the possibility to prove someone was part of the
        # server just in case. By knowing the exact email + approximate
        # time of deletion, one could run through hashes for every
        # second of the surrounding days/weeks.
        pseudorandom_value = str(int(time.time()))
        # Replace email + username with a (theoretically) unreversable
        # hash. If provided with the username we can probably re-hash
        # to identify if it is needed for some reason.
        #
        # Deleting multiple times will re-hash the username/email
        email_hash = new_secure_hash(user.email + pseudorandom_value)
        uname_hash = new_secure_hash(user.username + pseudorandom_value)
        # We must also redact username
        for role in user.all_roles():
            if self.app.config.redact_username_during_deletion:
                role.name = role.name.replace(user.username, uname_hash)
                role.description = role.description.replace(
                    user.username, uname_hash)

            if self.app.config.redact_email_during_deletion:
                role.name = role.name.replace(user.email, email_hash)
                role.description = role.description.replace(
                    user.email, email_hash)
            user.email = email_hash
            user.username = uname_hash
        # Redact user addresses as well
        if self.app.config.redact_user_address_during_deletion:
            user_addresses = self.session().query(self.app.model.UserAddress) \
                .filter(self.app.model.UserAddress.user_id == user.id).all()
            for addr in user_addresses:
                addr.desc = new_secure_hash(addr.desc + pseudorandom_value)
                addr.name = new_secure_hash(addr.name + pseudorandom_value)
                addr.institution = new_secure_hash(addr.institution +
                                                   pseudorandom_value)
                addr.address = new_secure_hash(addr.address +
                                               pseudorandom_value)
                addr.city = new_secure_hash(addr.city + pseudorandom_value)
                addr.state = new_secure_hash(addr.state + pseudorandom_value)
                addr.postal_code = new_secure_hash(addr.postal_code +
                                                   pseudorandom_value)
                addr.country = new_secure_hash(addr.country +
                                               pseudorandom_value)
                addr.phone = new_secure_hash(addr.phone + pseudorandom_value)
                self.session().add(addr)
        # Purge the user
        super().purge(user, flush=flush)
예제 #20
0
def build_workflow_run_config(trans, workflow, payload):
    app = trans.app
    history_manager = histories.HistoryManager(app)

    # Pull other parameters out of payload.
    param_map = payload.get('parameters', {})
    param_map = normalize_step_parameters(workflow.steps, param_map)
    inputs = payload.get('inputs', None)
    inputs_by = payload.get('inputs_by', None)
    if inputs is None:
        # Default to legacy behavior - read ds_map and reference steps
        # by unencoded step id (a raw database id).
        inputs = payload.get('ds_map', {})
        inputs_by = inputs_by or 'step_id|step_uuid'
    else:
        inputs = inputs or {}
        # New default is to reference steps by index of workflow step
        # which is intrinsic to the workflow and independent of the state
        # of Galaxy at the time of workflow import.
        inputs_by = inputs_by or 'step_index|step_uuid'

    add_to_history = 'no_add_to_history' not in payload
    history_param = payload.get('history', '')
    allow_tool_state_corrections = payload.get('allow_tool_state_corrections',
                                               False)

    # Sanity checks.
    if len(workflow.steps) == 0:
        raise exceptions.MessageException(
            "Workflow cannot be run because it does not have any steps")
    if workflow.has_cycles:
        raise exceptions.MessageException(
            "Workflow cannot be run because it contains cycles")
    if workflow.has_errors:
        message = "Workflow cannot be run because of validation errors in some steps"
        raise exceptions.MessageException(message)

    # Get target history.
    if history_param.startswith('hist_id='):
        # Passing an existing history to use.
        encoded_history_id = history_param[8:]
        history_id = __decode_id(trans,
                                 encoded_history_id,
                                 model_type="history")
        history = history_manager.get_owned(history_id,
                                            trans.user,
                                            current_history=trans.history)
    else:
        # Send workflow outputs to new history.
        history = app.model.History(name=history_param, user=trans.user)
        trans.sa_session.add(history)
        trans.sa_session.flush()

    # Set workflow inputs.
    for input_dict in inputs.itervalues():
        if 'src' not in input_dict:
            message = "Not input source type defined for input '%s'." % input_dict
            raise exceptions.RequestParameterInvalidException(message)
        if 'id' not in input_dict:
            message = "Not input id defined for input '%s'." % input_dict
            raise exceptions.RequestParameterInvalidException(message)
        if 'content' in input_dict:
            message = "Input cannot specify explicit 'content' attribute %s'." % input_dict
            raise exceptions.RequestParameterInvalidException(message)
        input_source = input_dict['src']
        input_id = input_dict['id']
        try:
            if input_source == 'ldda':
                ldda = trans.sa_session.query(
                    app.model.LibraryDatasetDatasetAssociation).get(
                        trans.security.decode_id(input_id))
                assert trans.user_is_admin(
                ) or trans.app.security_agent.can_access_dataset(
                    trans.get_current_user_roles(), ldda.dataset)
                content = ldda.to_history_dataset_association(
                    history, add_to_history=add_to_history)
            elif input_source == 'ld':
                ldda = trans.sa_session.query(app.model.LibraryDataset).get(
                    trans.security.decode_id(
                        input_id)).library_dataset_dataset_association
                assert trans.user_is_admin(
                ) or trans.app.security_agent.can_access_dataset(
                    trans.get_current_user_roles(), ldda.dataset)
                content = ldda.to_history_dataset_association(
                    history, add_to_history=add_to_history)
            elif input_source == 'hda':
                # Get dataset handle, add to dict and history if necessary
                content = trans.sa_session.query(
                    app.model.HistoryDatasetAssociation).get(
                        trans.security.decode_id(input_id))
                assert trans.user_is_admin(
                ) or trans.app.security_agent.can_access_dataset(
                    trans.get_current_user_roles(), content.dataset)
            elif input_source == 'uuid':
                dataset = trans.sa_session.query(app.model.Dataset).filter(
                    app.model.Dataset.uuid == input_id).first()
                if dataset is None:
                    # this will need to be changed later. If federation code is avalible, then a missing UUID
                    # could be found amoung fereration partners
                    message = "Input cannot find UUID: %s." % input_id
                    raise exceptions.RequestParameterInvalidException(message)
                assert trans.user_is_admin(
                ) or trans.app.security_agent.can_access_dataset(
                    trans.get_current_user_roles(), dataset)
                content = history.add_dataset(dataset)
            elif input_source == 'hdca':
                content = app.dataset_collections_service.get_dataset_collection_instance(
                    trans, 'history', input_id)
            else:
                message = "Unknown workflow input source '%s' specified." % input_source
                raise exceptions.RequestParameterInvalidException(message)
            if add_to_history and content.history != history:
                content = content.copy()
                if isinstance(content, app.model.HistoryDatasetAssociation):
                    history.add_dataset(content)
                else:
                    history.add_dataset_collection(content)
            input_dict['content'] = content
        except AssertionError:
            message = "Invalid workflow input '%s' specified" % input_id
            raise exceptions.ItemAccessibilityException(message)

    normalized_inputs = normalize_inputs(workflow.steps, inputs, inputs_by)

    # Run each step, connecting outputs to inputs
    replacement_dict = payload.get('replacement_params', {})

    run_config = WorkflowRunConfig(
        target_history=history,
        replacement_dict=replacement_dict,
        inputs=normalized_inputs,
        param_map=param_map,
        allow_tool_state_corrections=allow_tool_state_corrections)
    return run_config
예제 #21
0
def build_workflow_run_configs(trans, workflow, payload):
    app = trans.app
    allow_tool_state_corrections = payload.get('allow_tool_state_corrections', False)
    use_cached_job = payload.get('use_cached_job', False)

    # Sanity checks.
    if len(workflow.steps) == 0:
        raise exceptions.MessageException("Workflow cannot be run because it does not have any steps")
    if workflow.has_cycles:
        raise exceptions.MessageException("Workflow cannot be run because it contains cycles")

    if 'step_parameters' in payload and 'parameters' in payload:
        raise exceptions.RequestParameterInvalidException("Cannot specify both legacy parameters and step_parameters attributes.")
    if 'inputs' in payload and 'ds_map' in payload:
        raise exceptions.RequestParameterInvalidException("Cannot specify both legacy ds_map and input attributes.")

    add_to_history = 'no_add_to_history' not in payload
    legacy = payload.get('legacy', False)
    already_normalized = payload.get('parameters_normalized', False)
    raw_parameters = payload.get('parameters', {})

    run_configs = []
    unexpanded_param_map = _normalize_step_parameters(workflow.steps, raw_parameters, legacy=legacy, already_normalized=already_normalized)
    expanded_params, expanded_param_keys = expand_workflow_inputs(unexpanded_param_map)
    for index, param_map in enumerate(expanded_params):
        history = _get_target_history(trans, workflow, payload, expanded_param_keys, index)
        inputs = payload.get('inputs', None)
        inputs_by = payload.get('inputs_by', None)
        # New default is to reference steps by index of workflow step
        # which is intrinsic to the workflow and independent of the state
        # of Galaxy at the time of workflow import.
        default_inputs_by = 'step_index|step_uuid'
        if inputs is None:
            # Default to legacy behavior - read ds_map and reference steps
            # by unencoded step id (a raw database id).
            inputs = payload.get('ds_map', {})
            if legacy:
                default_inputs_by = 'step_id|step_uuid'
            inputs_by = inputs_by or default_inputs_by
        else:
            inputs = inputs or {}
        inputs_by = inputs_by or default_inputs_by
        if inputs or not already_normalized:
            normalized_inputs = _normalize_inputs(workflow.steps, inputs, inputs_by)
        else:
            # Only allow dumping IDs directly into JSON database instead of properly recording the
            # inputs with referential integrity if parameters are already normalized (coming from tool form).
            normalized_inputs = {}

        steps_by_id = workflow.steps_by_id
        # Set workflow inputs.
        for key, input_dict in normalized_inputs.items():
            step = steps_by_id[key]
            if step.type == 'parameter_input':
                continue
            if 'src' not in input_dict:
                raise exceptions.RequestParameterInvalidException("Not input source type defined for input '%s'." % input_dict)
            if 'id' not in input_dict:
                raise exceptions.RequestParameterInvalidException("Not input id defined for input '%s'." % input_dict)
            if 'content' in input_dict:
                raise exceptions.RequestParameterInvalidException("Input cannot specify explicit 'content' attribute %s'." % input_dict)
            input_source = input_dict['src']
            input_id = input_dict['id']
            try:
                if input_source == 'ldda':
                    ldda = trans.sa_session.query(app.model.LibraryDatasetDatasetAssociation).get(trans.security.decode_id(input_id))
                    assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset(trans.get_current_user_roles(), ldda.dataset)
                    content = ldda.to_history_dataset_association(history, add_to_history=add_to_history)
                elif input_source == 'ld':
                    ldda = trans.sa_session.query(app.model.LibraryDataset).get(trans.security.decode_id(input_id)).library_dataset_dataset_association
                    assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset(trans.get_current_user_roles(), ldda.dataset)
                    content = ldda.to_history_dataset_association(history, add_to_history=add_to_history)
                elif input_source == 'hda':
                    # Get dataset handle, add to dict and history if necessary
                    content = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(trans.security.decode_id(input_id))
                    assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset(trans.get_current_user_roles(), content.dataset)
                elif input_source == 'uuid':
                    dataset = trans.sa_session.query(app.model.Dataset).filter(app.model.Dataset.uuid == input_id).first()
                    if dataset is None:
                        # this will need to be changed later. If federation code is avalible, then a missing UUID
                        # could be found amoung fereration partners
                        raise exceptions.RequestParameterInvalidException("Input cannot find UUID: %s." % input_id)
                    assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset(trans.get_current_user_roles(), dataset)
                    content = history.add_dataset(dataset)
                elif input_source == 'hdca':
                    content = app.dataset_collections_service.get_dataset_collection_instance(trans, 'history', input_id)
                else:
                    raise exceptions.RequestParameterInvalidException("Unknown workflow input source '%s' specified." % input_source)
                if add_to_history and content.history != history:
                    content = content.copy()
                    if isinstance(content, app.model.HistoryDatasetAssociation):
                        history.add_dataset(content)
                    else:
                        history.add_dataset_collection(content)
                input_dict['content'] = content
            except AssertionError:
                raise exceptions.ItemAccessibilityException("Invalid workflow input '%s' specified" % input_id)
        for key in set(normalized_inputs.keys()):
            value = normalized_inputs[key]
            if isinstance(value, dict) and 'content' in value:
                normalized_inputs[key] = value['content']
            else:
                normalized_inputs[key] = value
        resource_params = payload.get('resource_params', {})
        if resource_params:
            # quick attempt to validate parameters, just handle select options now since is what
            # is needed for DTD - arbitrary plugins can define arbitrary logic at runtime in the
            # destination function. In the future this should be extended to allow arbitrary
            # pluggable validation.
            resource_mapper_function = get_resource_mapper_function(trans.app)
            # TODO: Do we need to do anything with the stored_workflow or can this be removed.
            resource_parameters = resource_mapper_function(trans=trans, stored_workflow=None, workflow=workflow)
            for resource_parameter in resource_parameters:
                if resource_parameter.get("type") == "select":
                    name = resource_parameter.get("name")
                    if name in resource_params:
                        value = resource_params[name]
                        valid_option = False
                        # TODO: How should be handle the case where no selection is made by the user
                        # This can happen when there is a select on the page but the user has no options to select
                        # Here I have the validation pass it through. An alternative may be to remove the parameter if
                        # it is None.
                        if value is None:
                            valid_option = True
                        else:
                            for option_elem in resource_parameter.get('data'):
                                option_value = option_elem.get("value")
                                if value == option_value:
                                    valid_option = True
                        if not valid_option:
                            raise exceptions.RequestParameterInvalidException("Invalid value for parameter '%s' found." % name)

        run_configs.append(WorkflowRunConfig(
            target_history=history,
            replacement_dict=payload.get('replacement_params', {}),
            inputs=normalized_inputs,
            param_map=param_map,
            allow_tool_state_corrections=allow_tool_state_corrections,
            use_cached_job=use_cached_job,
            resource_params=resource_params,
        ))

    return run_configs
예제 #22
0
    def update(self, trans, id, payload, **kwds):
        """
        * PUT /api/workflows/{id}
            updates the workflow stored with ``id``

        :type   id:      str
        :param  id:      the encoded id of the workflow to update
        :type   payload: dict
        :param  payload: a dictionary containing any or all the
            * workflow   the json description of the workflow as would be
                         produced by GET workflows/<id>/download or
                         given to `POST workflows`

                         The workflow contents will be updated to target
                         this.

            * name       optional string name for the workflow, if not present in payload,
                         name defaults to existing name
            * annotation optional string annotation for the workflow, if not present in payload,
                         annotation defaults to existing annotation
            * menu_entry optional boolean marking if the workflow should appear in the user's menu,
                         if not present, workflow menu entries are not modified

        :rtype:     dict
        :returns:   serialized version of the workflow
        """
        stored_workflow = self.__get_stored_workflow(trans, id)
        workflow_dict = payload.get('workflow') or payload
        if workflow_dict:
            raw_workflow_description = self.__normalize_workflow(
                trans, workflow_dict)
            workflow_dict = raw_workflow_description.as_dict
            new_workflow_name = workflow_dict.get('name') or workflow_dict.get(
                'name')
            if new_workflow_name and new_workflow_name != stored_workflow.name:
                sanitized_name = sanitize_html(new_workflow_name)
                workflow = stored_workflow.latest_workflow.copy()
                workflow.stored_workflow = stored_workflow
                workflow.name = sanitized_name
                stored_workflow.name = sanitized_name
                stored_workflow.latest_workflow = workflow
                trans.sa_session.add(workflow, stored_workflow)
                trans.sa_session.flush()

            if 'annotation' in workflow_dict:
                newAnnotation = sanitize_html(workflow_dict['annotation'])
                self.add_item_annotation(trans.sa_session, trans.get_user(),
                                         stored_workflow, newAnnotation)

            if 'menu_entry' in workflow_dict or 'show_in_tool_panel' in workflow_dict:
                if workflow_dict.get('menu_entry') or workflow_dict.get(
                        'show_in_tool_panel'):
                    menuEntry = model.StoredWorkflowMenuEntry()
                    menuEntry.stored_workflow = stored_workflow
                    trans.get_user().stored_workflow_menu_entries.append(
                        menuEntry)
                else:
                    # remove if in list
                    entries = {
                        x.stored_workflow_id: x
                        for x in trans.get_user().stored_workflow_menu_entries
                    }
                    if (trans.security.decode_id(id) in entries):
                        trans.get_user().stored_workflow_menu_entries.remove(
                            entries[trans.security.decode_id(id)])
            # set tags
            trans.app.tag_handler.set_tags_from_list(
                user=trans.user,
                item=stored_workflow,
                new_tags_list=workflow_dict.get('tags', []))

            if 'steps' in workflow_dict:
                try:
                    from_dict_kwds = self.__import_or_update_kwds(payload)
                    workflow, errors = self.workflow_contents_manager.update_workflow_from_raw_description(
                        trans, stored_workflow, raw_workflow_description,
                        **from_dict_kwds)
                except workflows.MissingToolsException:
                    raise exceptions.MessageException(
                        "This workflow contains missing tools. It cannot be saved until they have been removed from the workflow or installed."
                    )
            else:
                # We only adjusted tags and menu entry
                return payload
        else:
            message = "Updating workflow requires dictionary containing 'workflow' attribute with new JSON description."
            raise exceptions.RequestParameterInvalidException(message)
        return self.workflow_contents_manager.workflow_to_dict(
            trans, stored_workflow, style="instance")
예제 #23
0
    def execute( self, trans, progress, invocation, step ):
        tool = trans.app.toolbox.get_tool( step.tool_id, tool_version=step.tool_version )
        tool_state = step.state
        # Not strictly needed - but keep Tool state clean by stripping runtime
        # metadata parameters from it.
        if RUNTIME_STEP_META_STATE_KEY in tool_state.inputs:
            del tool_state.inputs[ RUNTIME_STEP_META_STATE_KEY ]
        collections_to_match = self._find_collections_to_match( tool, progress, step )
        # Have implicit collections...
        if collections_to_match.has_collections():
            collection_info = self.trans.app.dataset_collections_service.match_collections( collections_to_match )
        else:
            collection_info = None

        param_combinations = []
        if collection_info:
            iteration_elements_iter = collection_info.slice_collections()
        else:
            iteration_elements_iter = [ None ]

        for iteration_elements in iteration_elements_iter:
            execution_state = tool_state.copy()
            # TODO: Move next step into copy()
            execution_state.inputs = make_dict_copy( execution_state.inputs )

            # Connect up
            def callback( input, prefixed_name, **kwargs ):
                replacement = NO_REPLACEMENT
                if isinstance( input, DataToolParameter ) or isinstance( input, DataCollectionToolParameter ):
                    if iteration_elements and prefixed_name in iteration_elements:
                        if isinstance( input, DataToolParameter ):
                            # Pull out dataset instance from element.
                            replacement = iteration_elements[ prefixed_name ].dataset_instance
                            if hasattr(iteration_elements[ prefixed_name ], u'element_identifier') and iteration_elements[ prefixed_name ].element_identifier:
                                replacement.element_identifier = iteration_elements[ prefixed_name ].element_identifier
                        else:
                            # If collection - just use element model object.
                            replacement = iteration_elements[ prefixed_name ]
                    else:
                        replacement = progress.replacement_for_tool_input( step, input, prefixed_name )
                else:
                    replacement = progress.replacement_for_tool_input( step, input, prefixed_name )
                return replacement

            try:
                # Replace DummyDatasets with historydatasetassociations
                visit_input_values( tool.inputs, execution_state.inputs, callback, no_replacement_value=NO_REPLACEMENT )
            except KeyError as k:
                message_template = "Error due to input mapping of '%s' in '%s'.  A common cause of this is conditional outputs that cannot be determined until runtime, please review your workflow."
                message = message_template % (tool.name, k.message)
                raise exceptions.MessageException( message )
            param_combinations.append( execution_state.inputs )

        try:
            execution_tracker = execute(
                trans=self.trans,
                tool=tool,
                param_combinations=param_combinations,
                history=invocation.history,
                collection_info=collection_info,
                workflow_invocation_uuid=invocation.uuid.hex
            )
        except ToolInputsNotReadyException:
            delayed_why = "tool [%s] inputs are not ready, this special tool requires inputs to be ready" % tool.id
            raise DelayedWorkflowEvaluation(why=delayed_why)

        if collection_info:
            step_outputs = dict( execution_tracker.implicit_collections )
        else:
            step_outputs = dict( execution_tracker.output_datasets )
            step_outputs.update( execution_tracker.output_collections )
        progress.set_step_outputs( step, step_outputs )
        jobs = execution_tracker.successful_jobs
        for job in jobs:
            self._handle_post_job_actions( step, job, invocation.replacement_dict )
        if execution_tracker.execution_errors:
            failed_count = len(execution_tracker.execution_errors)
            success_count = len(execution_tracker.successful_jobs)
            all_count = failed_count + success_count
            message = "Failed to create %d out of %s job(s) for workflow step." % (failed_count, all_count)
            raise Exception(message)
        return jobs
예제 #24
0
    def _workflow_to_dict_editor(self, trans, stored):
        workflow = stored.latest_workflow
        # Pack workflow data into a dictionary and return
        data = {}
        data['name'] = workflow.name
        data['steps'] = {}
        data['upgrade_messages'] = {}
        # For each step, rebuild the form and encode the state
        for step in workflow.steps:
            # Load from database representation
            module = module_factory.from_workflow_step(trans,
                                                       step,
                                                       exact_tools=False)
            if not module:
                raise exceptions.MessageException(
                    'Unrecognized step type: %s' % step.type)
            # Load label from state of data input modules, necessary for backward compatibility
            self.__set_default_label(step, module, step.tool_inputs)
            # Fix any missing parameters
            upgrade_message = module.check_and_update_state()
            if upgrade_message:
                data['upgrade_messages'][step.order_index] = upgrade_message
            if (hasattr(module,
                        "version_changes")) and (module.version_changes):
                if step.order_index in data['upgrade_messages']:
                    data['upgrade_messages'][step.order_index][
                        module.tool.name] = "\n".join(module.version_changes)
                else:
                    data['upgrade_messages'][step.order_index] = {
                        module.tool.name: "\n".join(module.version_changes)
                    }
            # Get user annotation.
            annotation_str = self.get_item_annotation_str(
                trans.sa_session, trans.user, step) or ''
            config_form = module.get_config_form()
            # Pack attributes into plain dictionary
            step_dict = {
                'id': step.order_index,
                'type': module.type,
                'label': module.label,
                'content_id': module.get_content_id(),
                'name': module.get_name(),
                'tool_state': module.get_state(),
                'tooltip': module.get_tooltip(static_path=url_for('/static')),
                'errors': module.get_errors(),
                'data_inputs': module.get_data_inputs(),
                'data_outputs': module.get_data_outputs(),
                'config_form': config_form,
                'annotation': annotation_str,
                'post_job_actions': {},
                'uuid': str(step.uuid) if step.uuid else None,
                'workflow_outputs': []
            }
            # Connections
            input_connections = step.input_connections
            input_connections_type = {}
            multiple_input = {
            }  # Boolean value indicating if this can be mutliple
            if (step.type is None or step.type == 'tool') and module.tool:
                # Determine full (prefixed) names of valid input datasets
                data_input_names = {}

                def callback(input, prefixed_name, **kwargs):
                    if isinstance(input, DataToolParameter) or isinstance(
                            input, DataCollectionToolParameter):
                        data_input_names[prefixed_name] = True
                        multiple_input[prefixed_name] = input.multiple
                        if isinstance(input, DataToolParameter):
                            input_connections_type[input.name] = "dataset"
                        if isinstance(input, DataCollectionToolParameter):
                            input_connections_type[
                                input.name] = "dataset_collection"

                visit_input_values(module.tool.inputs, module.state.inputs,
                                   callback)
                # Filter
                # FIXME: this removes connection without displaying a message currently!
                input_connections = [
                    conn for conn in input_connections
                    if conn.input_name in data_input_names
                ]
                # post_job_actions
                pja_dict = {}
                for pja in step.post_job_actions:
                    pja_dict[pja.action_type + pja.output_name] = dict(
                        action_type=pja.action_type,
                        output_name=pja.output_name,
                        action_arguments=pja.action_arguments)
                step_dict['post_job_actions'] = pja_dict

            # workflow outputs
            outputs = []
            for output in step.unique_workflow_outputs:
                output_label = output.label
                output_name = output.output_name
                output_uuid = str(output.uuid) if output.uuid else None
                outputs.append({
                    "output_name": output_name,
                    "uuid": output_uuid,
                    "label": output_label
                })
            step_dict['workflow_outputs'] = outputs

            # Encode input connections as dictionary
            input_conn_dict = {}
            for conn in input_connections:
                input_type = "dataset"
                if conn.input_name in input_connections_type:
                    input_type = input_connections_type[conn.input_name]
                conn_dict = dict(id=conn.output_step.order_index,
                                 output_name=conn.output_name,
                                 input_type=input_type)
                if conn.input_name in multiple_input:
                    if conn.input_name in input_conn_dict:
                        input_conn_dict[conn.input_name].append(conn_dict)
                    else:
                        input_conn_dict[conn.input_name] = [conn_dict]
                else:
                    input_conn_dict[conn.input_name] = conn_dict
            step_dict['input_connections'] = input_conn_dict

            # Position
            step_dict['position'] = step.position
            # Add to return value
            data['steps'][step.order_index] = step_dict
        return data
예제 #25
0
파일: sharable.py 프로젝트: msauria/galaxy
    def sharing(self, trans, id: EncodedDatabaseIdField, payload: Optional[SharingPayload] = None) -> SharingStatus:
        """Allows to publish or share with other users the given resource (by id) and returns the current sharing
        status of the resource.

        :param id: The encoded ID of the resource to share.
        :type id: EncodedDatabaseIdField
        :param payload: The options to share this resource, defaults to None
        :type payload: Optional[sharable.SharingPayload], optional
        :return: The current sharing status of the resource.
        :rtype: sharable.SharingStatus
        """
        skipped = False
        class_name = self.manager.model_class.__name__
        item = base.get_object(trans, id, class_name, check_ownership=True, check_accessible=True, deleted=False)
        actions = []
        if payload:
            actions += payload.action.split("-")
        for action in actions:
            if action == "make_accessible_via_link":
                self._make_item_accessible(trans.sa_session, item)
                if hasattr(item, "has_possible_members") and item.has_possible_members:
                    skipped = self._make_members_public(trans, item)
            elif action == "make_accessible_and_publish":
                self._make_item_accessible(trans.sa_session, item)
                if hasattr(item, "has_possible_members") and item.has_possible_members:
                    skipped = self._make_members_public(trans, item)
                item.published = True
            elif action == "publish":
                if item.importable:
                    item.published = True
                    if hasattr(item, "has_possible_members") and item.has_possible_members:
                        skipped = self._make_members_public(trans, item)
                else:
                    raise exceptions.MessageException(f"{class_name} not importable.")
            elif action == "disable_link_access":
                item.importable = False
            elif action == "unpublish":
                item.published = False
            elif action == "disable_link_access_and_unpublish":
                item.importable = item.published = False
            elif action == "unshare_user":
                if payload is None or payload.user_id is None:
                    raise exceptions.MessageException(f"Missing required user_id to perform {action}")
                user = trans.sa_session.query(trans.app.model.User).get(trans.app.security.decode_id(payload.user_id))
                class_name_lc = class_name.lower()
                ShareAssociation = getattr(trans.app.model, f"{class_name}UserShareAssociation")
                usas = trans.sa_session.query(ShareAssociation).filter_by(**{"user": user, class_name_lc: item}).all()
                if not usas:
                    raise exceptions.MessageException(f"{class_name} was not shared with user.")
                for usa in usas:
                    trans.sa_session.delete(usa)
            trans.sa_session.add(item)
            trans.sa_session.flush()
        if item.importable and not item.slug:
            self._make_item_accessible(trans.sa_session, item)
        item_dict = self.serializer.serialize_to_view(item,
            user=trans.user, trans=trans, default_view="sharing")
        item_dict["users_shared_with"] = [{"id": self.manager.app.security.encode_id(a.user.id), "email": a.user.email} for a in item.users_shared_with]
        if skipped:
            item_dict["skipped"] = True
        return SharingStatus.parse_obj(item_dict)
예제 #26
0
    def create(self, trans, payload, **kwd):
        """
        create( trans, payload )
        * POST /api/histories:
            create a new history

        :type   payload: dict
        :param  payload: (optional) dictionary structure containing:
            * name:             the new history's name
            * history_id:       the id of the history to copy
            * all_datasets:     copy deleted hdas/hdcas? 'True' or 'False', defaults to True
            * archive_source:   the url that will generate the archive to import
            * archive_type:     'url' (default)

        :param  keys: same as the use of `keys` in the `index` function above
        :param  view: same as the use of `view` in the `index` function above

        :rtype:     dict
        :returns:   element view of new history
        """
        if trans.user.bootstrap_admin_user:
            raise exceptions.RealUserRequiredException(
                "Only real users can create histories.")
        hist_name = None
        if payload.get('name', None):
            hist_name = restore_text(payload['name'])
        copy_this_history_id = payload.get('history_id', None)

        all_datasets = util.string_as_bool(payload.get('all_datasets', True))

        if "archive_source" in payload:
            archive_source = payload["archive_source"]
            archive_file = payload.get("archive_file")
            if archive_source:
                archive_type = payload.get("archive_type", "url")
            elif hasattr(archive_file, "file"):
                archive_source = payload["archive_file"].file.name
                archive_type = "file"
            else:
                raise exceptions.MessageException(
                    "Please provide a url or file.")
            job = self.queue_history_import(trans,
                                            archive_type=archive_type,
                                            archive_source=archive_source)
            job_dict = job.to_dict()
            job_dict[
                "message"] = "Importing history from source '%s'. This history will be visible when the import is complete." % archive_source
            return trans.security.encode_all_ids(job_dict)

        new_history = None
        # if a history id was passed, copy that history
        if copy_this_history_id:
            decoded_id = self.decode_id(copy_this_history_id)
            original_history = self.manager.get_accessible(
                decoded_id, trans.user, current_history=trans.history)
            hist_name = hist_name or ("Copy of '%s'" % original_history.name)
            new_history = original_history.copy(name=hist_name,
                                                target_user=trans.user,
                                                all_datasets=all_datasets)

        # otherwise, create a new empty history
        else:
            new_history = self.manager.create(user=trans.user, name=hist_name)

        trans.app.security_agent.history_set_default_permissions(new_history)
        trans.sa_session.add(new_history)
        trans.sa_session.flush()

        # an anonymous user can only have one history
        if self.user_manager.is_anonymous(trans.user):
            self.manager.set_current(trans, new_history)

        return self.serializer.serialize_to_view(
            new_history,
            user=trans.user,
            trans=trans,
            **self._parse_serialization_params(kwd, 'detailed'))
예제 #27
0
 def _workflow_to_dict_run(self, trans, stored):
     """
     Builds workflow dictionary used by run workflow form
     """
     workflow = stored.latest_workflow
     if len(workflow.steps) == 0:
         raise exceptions.MessageException(
             'Workflow cannot be run because it does not have any steps.')
     if attach_ordered_steps(workflow, workflow.steps):
         raise exceptions.MessageException(
             'Workflow cannot be run because it contains cycles.')
     trans.workflow_building_mode = workflow_building_modes.USE_HISTORY
     module_injector = WorkflowModuleInjector(trans)
     has_upgrade_messages = False
     step_version_changes = []
     missing_tools = []
     errors = {}
     for step in workflow.steps:
         try:
             module_injector.inject(step,
                                    steps=workflow.steps,
                                    exact_tools=False)
         except exceptions.ToolMissingException:
             if step.tool_id not in missing_tools:
                 missing_tools.append(step.tool_id)
             continue
         if step.upgrade_messages:
             has_upgrade_messages = True
         if step.type == 'tool' or step.type is None:
             if step.module.version_changes:
                 step_version_changes.extend(step.module.version_changes)
             step_errors = step.module.get_errors()
             if step_errors:
                 errors[step.id] = step_errors
     if missing_tools:
         workflow.annotation = self.get_item_annotation_str(
             trans.sa_session, trans.user, workflow)
         raise exceptions.MessageException('Following tools missing: %s' %
                                           ', '.join(missing_tools))
     workflow.annotation = self.get_item_annotation_str(
         trans.sa_session, trans.user, workflow)
     step_order_indices = {}
     for step in workflow.steps:
         step_order_indices[step.id] = step.order_index
     step_models = []
     for i, step in enumerate(workflow.steps):
         step_model = None
         if step.type == 'tool':
             incoming = {}
             tool = trans.app.toolbox.get_tool(
                 step.tool_id, tool_version=step.tool_version)
             params_to_incoming(incoming, tool.inputs, step.state.inputs,
                                trans.app)
             step_model = tool.to_json(
                 trans,
                 incoming,
                 workflow_building_mode=workflow_building_modes.USE_HISTORY)
             step_model['post_job_actions'] = [{
                 'short_str':
                 ActionBox.get_short_str(pja),
                 'action_type':
                 pja.action_type,
                 'output_name':
                 pja.output_name,
                 'action_arguments':
                 pja.action_arguments
             } for pja in step.post_job_actions]
         else:
             inputs = step.module.get_runtime_inputs(
                 connections=step.output_connections)
             step_model = {
                 'inputs':
                 [input.to_dict(trans) for input in inputs.values()]
             }
         step_model['step_type'] = step.type
         step_model['step_label'] = step.label
         step_model['step_name'] = step.module.get_name()
         step_model['step_version'] = step.module.get_version()
         step_model['step_index'] = step.order_index
         step_model['output_connections'] = [{
             'input_step_index':
             step_order_indices.get(oc.input_step_id),
             'output_step_index':
             step_order_indices.get(oc.output_step_id),
             'input_name':
             oc.input_name,
             'output_name':
             oc.output_name
         } for oc in step.output_connections]
         if step.annotations:
             step_model['annotation'] = step.annotations[0].annotation
         if step.upgrade_messages:
             step_model['messages'] = step.upgrade_messages
         step_models.append(step_model)
     return {
         'id':
         trans.app.security.encode_id(stored.id),
         'history_id':
         trans.app.security.encode_id(trans.history.id)
         if trans.history else None,
         'name':
         stored.name,
         'steps':
         step_models,
         'step_version_changes':
         step_version_changes,
         'has_upgrade_messages':
         has_upgrade_messages,
         'workflow_resource_parameters':
         self._workflow_resource_parameters(trans, stored, workflow),
     }
예제 #28
0
 def _get_registry(self, trans):
     if not trans.app.visualizations_registry:
         raise exceptions.MessageException("The visualization registry has not been configured.")
     return trans.app.visualizations_registry
예제 #29
0
    def _workflow_to_dict_export(self, trans, stored=None, workflow=None):
        """ Export the workflow contents to a dictionary ready for JSON-ification and export.
        """
        if workflow is None:
            assert stored is not None
            workflow = stored.latest_workflow

        annotation_str = ""
        tag_str = ""
        if stored is not None:
            annotation_str = self.get_item_annotation_str(
                trans.sa_session, trans.user, stored) or ''
            tag_str = stored.make_tag_string_list()
        # Pack workflow data into a dictionary and return
        data = {}
        data[
            'a_galaxy_workflow'] = 'true'  # Placeholder for identifying galaxy workflow
        data['format-version'] = "0.1"
        data['name'] = workflow.name
        data['annotation'] = annotation_str
        data['tags'] = tag_str
        if workflow.uuid is not None:
            data['uuid'] = str(workflow.uuid)
        data['steps'] = {}
        # For each step, rebuild the form and encode the state
        for step in workflow.steps:
            # Load from database representation
            module = module_factory.from_workflow_step(trans, step)
            if not module:
                raise exceptions.MessageException(
                    'Unrecognized step type: %s' % step.type)
            # Get user annotation.
            annotation_str = self.get_item_annotation_str(
                trans.sa_session, trans.user, step) or ''
            content_id = module.get_content_id()
            # Export differences for backward compatibility
            if module.type == 'tool':
                tool_state = module.get_state(nested=False)
            else:
                tool_state = module.state.inputs
            # Step info
            step_dict = {
                'id': step.order_index,
                'type': module.type,
                'content_id': content_id,
                'tool_id':
                content_id,  # For worklfows exported to older Galaxies,
                # eliminate after a few years...
                'tool_version': step.tool_version,
                'name': module.get_name(),
                'tool_state': json.dumps(tool_state),
                'errors': module.get_errors(),
                'uuid': str(step.uuid),
                'label': step.label or None,
                'annotation': annotation_str
            }
            # Add tool shed repository information and post-job actions to step dict.
            if module.type == 'tool':
                if module.tool and module.tool.tool_shed:
                    step_dict["tool_shed_repository"] = {
                        'name': module.tool.repository_name,
                        'owner': module.tool.repository_owner,
                        'changeset_revision': module.tool.changeset_revision,
                        'tool_shed': module.tool.tool_shed
                    }
                pja_dict = {}
                for pja in step.post_job_actions:
                    pja_dict[pja.action_type + pja.output_name] = dict(
                        action_type=pja.action_type,
                        output_name=pja.output_name,
                        action_arguments=pja.action_arguments)
                step_dict['post_job_actions'] = pja_dict

            if module.type == 'subworkflow':
                del step_dict['content_id']
                del step_dict['errors']
                del step_dict['tool_version']
                del step_dict['tool_state']
                subworkflow = step.subworkflow
                subworkflow_as_dict = self._workflow_to_dict_export(
                    trans, stored=None, workflow=subworkflow)
                step_dict['subworkflow'] = subworkflow_as_dict

            # Data inputs, legacy section not used anywhere within core
            input_dicts = []
            step_state = module.state.inputs or {}
            if "name" in step_state and module.type != 'tool':
                name = step_state.get("name")
                input_dicts.append({
                    "name": name,
                    "description": annotation_str
                })
            for name, val in step_state.items():
                input_type = type(val)
                if input_type == RuntimeValue:
                    input_dicts.append({
                        "name":
                        name,
                        "description":
                        "runtime parameter for tool %s" % module.get_name()
                    })
                elif input_type == dict:
                    # Input type is described by a dict, e.g. indexed parameters.
                    for partval in val.values():
                        if type(partval) == RuntimeValue:
                            input_dicts.append({
                                "name":
                                name,
                                "description":
                                "runtime parameter for tool %s" %
                                module.get_name()
                            })
            step_dict['inputs'] = input_dicts

            # User outputs
            workflow_outputs_dicts = []
            for workflow_output in step.unique_workflow_outputs:
                workflow_output_dict = dict(
                    output_name=workflow_output.output_name,
                    label=workflow_output.label,
                    uuid=str(workflow_output.uuid)
                    if workflow_output.uuid is not None else None,
                )
                workflow_outputs_dicts.append(workflow_output_dict)
            step_dict['workflow_outputs'] = workflow_outputs_dicts

            # All step outputs
            step_dict['outputs'] = []
            if type(module) is ToolModule:
                for output in module.get_data_outputs():
                    step_dict['outputs'].append({
                        'name': output['name'],
                        'type': output['extensions'][0]
                    })

            # Connections
            input_connections = step.input_connections
            if step.type is None or step.type == 'tool':
                # Determine full (prefixed) names of valid input datasets
                data_input_names = {}

                def callback(input, prefixed_name, **kwargs):
                    if isinstance(input, DataToolParameter) or isinstance(
                            input, DataCollectionToolParameter):
                        data_input_names[prefixed_name] = True

                # FIXME: this updates modules silently right now; messages from updates should be provided.
                module.check_and_update_state()
                if module.tool:
                    # If the tool is installed we attempt to verify input values
                    # and connections, otherwise the last known state will be dumped without modifications.
                    visit_input_values(module.tool.inputs, module.state.inputs,
                                       callback)
                    # FIXME: this removes connection without displaying a message currently!
                    input_connections = [
                        conn for conn in input_connections
                        if (conn.input_name in data_input_names
                            or conn.non_data_connection)
                    ]

            # Encode input connections as dictionary
            input_conn_dict = {}
            unique_input_names = set(
                [conn.input_name for conn in input_connections])
            for input_name in unique_input_names:
                input_conn_dicts = []
                for conn in input_connections:
                    if conn.input_name != input_name:
                        continue
                    input_conn = dict(id=conn.output_step.order_index,
                                      output_name=conn.output_name)
                    if conn.input_subworkflow_step is not None:
                        subworkflow_step_id = conn.input_subworkflow_step.order_index
                        input_conn[
                            "input_subworkflow_step_id"] = subworkflow_step_id

                    input_conn_dicts.append(input_conn)
                input_conn_dict[input_name] = input_conn_dicts

            # Preserve backward compatability. Previously Galaxy
            # assumed input connections would be dictionaries not
            # lists of dictionaries, so replace any singleton list
            # with just the dictionary so that workflows exported from
            # newer Galaxy instances can be used with older Galaxy
            # instances if they do no include multiple input
            # tools. This should be removed at some point. Mirrored
            # hack in _workflow_from_dict should never be removed so
            # existing workflow exports continue to function.
            for input_name, input_conn in dict(input_conn_dict).items():
                if len(input_conn) == 1:
                    input_conn_dict[input_name] = input_conn[0]
            step_dict['input_connections'] = input_conn_dict
            # Position
            step_dict['position'] = step.position
            # Add to return value
            data['steps'][step.order_index] = step_dict
        return data
예제 #30
0
파일: tools.py 프로젝트: willemdiehl/galaxy
    def _create(self, trans, payload, **kwd):
        action = payload.get('action', None)
        if action == 'rerun':
            raise Exception("'rerun' action has been deprecated")

        # Get tool.
        tool_version = payload.get('tool_version', None)
        tool_id = payload.get('tool_id', None)
        tool_uuid = payload.get('tool_uuid', None)
        get_kwds = dict(
            tool_id=tool_id,
            tool_uuid=tool_uuid,
            tool_version=tool_version,
        )
        if tool_id is None and tool_uuid is None:
            raise exceptions.RequestParameterMissingException("Must specify either a tool_id or a tool_uuid.")

        tool = trans.app.toolbox.get_tool(**get_kwds)
        if not tool or not tool.allow_user_access(trans.user):
            raise exceptions.MessageException('Tool not found or not accessible.')
        if trans.app.config.user_activation_on:
            if not trans.user:
                log.warning("Anonymous user attempts to execute tool, but account activation is turned on.")
            elif not trans.user.active:
                log.warning("User \"%s\" attempts to execute tool, but account activation is turned on and user account is not active." % trans.user.email)

        # Set running history from payload parameters.
        # History not set correctly as part of this API call for
        # dataset upload.
        history_id = payload.get('history_id', None)
        if history_id:
            decoded_id = self.decode_id(history_id)
            target_history = self.history_manager.get_owned(decoded_id, trans.user, current_history=trans.history)
        else:
            target_history = None

        # Set up inputs.
        inputs = payload.get('inputs', {})

        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.items():
            if k.startswith('files_') or k.startswith('__files_'):
                inputs[k] = v

        # for inputs that are coming from the Library, copy them into the history
        self._patch_library_inputs(trans, inputs, target_history)

        # TODO: encode data ids and decode ids.
        # TODO: handle dbkeys
        params = util.Params(inputs, sanitize=False)
        incoming = params.__dict__

        # use_cached_job can be passed in via the top-level payload or among the tool inputs.
        # I think it should be a top-level parameter, but because the selector is implemented
        # as a regular tool parameter we accept both.
        use_cached_job = payload.get('use_cached_job', False) or util.string_as_bool(inputs.get('use_cached_job', 'false'))
        vars = tool.handle_input(trans, incoming, history=target_history, use_cached_job=use_cached_job)

        # TODO: check for errors and ensure that output dataset(s) are available.
        output_datasets = vars.get('out_data', [])
        rval = {'outputs': [], 'output_collections': [], 'jobs': [], 'implicit_collections': []}
        rval['produces_entry_points'] = tool.produces_entry_points
        job_errors = vars.get('job_errors', [])
        if job_errors:
            # If we are here - some jobs were successfully executed but some failed.
            rval['errors'] = job_errors

        outputs = rval['outputs']
        # TODO:?? poss. only return ids?
        for output_name, output in output_datasets:
            output_dict = output.to_dict()
            # add the output name back into the output data structure
            # so it's possible to figure out which newly created elements
            # correspond with which tool file outputs
            output_dict['output_name'] = output_name
            outputs.append(trans.security.encode_dict_ids(output_dict, skip_startswith="metadata_"))

        new_pja_flush = False
        for job in vars.get('jobs', []):
            rval['jobs'].append(self.encode_all_ids(trans, job.to_dict(view='collection'), recursive=True))
            if inputs.get('send_email_notification', False):
                # Unless an anonymous user is invoking this via the API it
                # should never be an option, but check and enforce that here
                if trans.user is None:
                    raise exceptions.ToolExecutionError("Anonymously run jobs cannot send an email notification.")
                else:
                    job_email_action = trans.model.PostJobAction('EmailAction')
                    job.add_post_job_action(job_email_action)
                    new_pja_flush = True

        if new_pja_flush:
            trans.sa_session.flush()

        for output_name, collection_instance in vars.get('output_collections', []):
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['output_collections'].append(output_dict)

        for output_name, collection_instance in vars.get('implicit_collections', {}).items():
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['implicit_collections'].append(output_dict)

        return rval