def create(self, trans, payload, **kwd): """ POST /api/workflows Run or create workflows from the api. If installed_repository_file or from_history_id is specified a new workflow will be created for this user. Otherwise, workflow_id must be specified and this API method will cause a workflow to execute. :param installed_repository_file The path of a workflow to import. Either workflow_id, installed_repository_file or from_history_id must be specified :type installed_repository_file str :param workflow_id: An existing workflow id. Either workflow_id, installed_repository_file or from_history_id must be specified :type workflow_id: str :param parameters: If workflow_id is set - see _update_step_parameters() :type parameters: dict :param ds_map: If workflow_id is set - a dictionary mapping each input step id to a dictionary with 2 keys: 'src' (which can be 'ldda', 'ld' or 'hda') and 'id' (which should be the id of a LibraryDatasetDatasetAssociation, LibraryDataset or HistoryDatasetAssociation respectively) :type ds_map: dict :param no_add_to_history: If workflow_id is set - if present in the payload with any value, the input datasets will not be added to the selected history :type no_add_to_history: str :param history: If workflow_id is set - optional history where to run the workflow, either the name of a new history or "hist_id=HIST_ID" where HIST_ID is the id of an existing history. If not specified, the workflow will be run a new unnamed history :type history: str :param replacement_params: If workflow_id is set - an optional dictionary used when renaming datasets :type replacement_params: dict :param from_history_id: Id of history to extract a workflow from. Either workflow_id, installed_repository_file or from_history_id must be specified :type from_history_id: str :param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history :type job_ids: str :param dataset_ids: If from_history_id is set - optional list of HDA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_ids: str :param dataset_collection_ids: If from_history_id is set - optional list of HDCA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_collection_ids: str :param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history :type workflow_name: str :param allow_tool_state_corrections: If set to True, any Tool parameter changes will not prevent running workflow, defaults to False :type allow_tool_state_corrections: bool """ ways_to_create = set([ 'workflow_id', 'installed_repository_file', 'from_history_id', 'shared_workflow_id', 'workflow', ]).intersection(payload) if len(ways_to_create) == 0: message = "One parameter among - %s - must be specified" % ", ".join( ways_to_create) raise exceptions.RequestParameterMissingException(message) if len(ways_to_create) > 1: message = "Only one parameter among - %s - must be specified" % ", ".join( ways_to_create) raise exceptions.RequestParameterInvalidException(message) if 'installed_repository_file' in payload: workflow_controller = trans.webapp.controllers['workflow'] result = workflow_controller.import_workflow(trans=trans, cntrller='api', **payload) return result if 'from_history_id' in payload: from_history_id = payload.get('from_history_id') from_history_id = self.decode_id(from_history_id) history = self.history_manager.get_accessible( from_history_id, trans.user, current_history=trans.history) job_ids = map(self.decode_id, payload.get('job_ids', [])) dataset_ids = payload.get('dataset_ids', []) dataset_collection_ids = payload.get('dataset_collection_ids', []) workflow_name = payload['workflow_name'] stored_workflow = extract_workflow( trans=trans, user=trans.get_user(), history=history, job_ids=job_ids, dataset_ids=dataset_ids, dataset_collection_ids=dataset_collection_ids, workflow_name=workflow_name, ) item = stored_workflow.to_dict( value_mapper={'id': trans.security.encode_id}) item['url'] = url_for('workflow', id=item['id']) return item if 'shared_workflow_id' in payload: workflow_id = payload['shared_workflow_id'] return self.__api_import_shared_workflow(trans, workflow_id, payload) if 'workflow' in payload: return self.__api_import_new_workflow(trans, payload, **kwd) workflow_id = payload.get('workflow_id', None) if not workflow_id: message = "Invalid workflow_id specified." raise exceptions.RequestParameterInvalidException(message) # Get workflow + accessibility check. stored_workflow = self.__get_stored_accessible_workflow( trans, workflow_id) workflow = stored_workflow.latest_workflow run_config = build_workflow_run_config(trans, workflow, payload) history = run_config.target_history # invoke may throw MessageExceptions on tool erors, failure # to match up inputs, etc... outputs, invocation = invoke( trans=trans, workflow=workflow, workflow_run_config=run_config, populate_state=True, ) trans.sa_session.flush() # Build legacy output - should probably include more information from # outputs. rval = {} rval['history'] = trans.security.encode_id(history.id) rval['outputs'] = [] for step in workflow.steps: if step.type == 'tool' or step.type is None: for v in outputs[step.id].itervalues(): rval['outputs'].append(trans.security.encode_id(v.id)) # Newer version of this API just returns the invocation as a dict, to # facilitate migration - produce the newer style response and blend in # the older information. invocation_response = self.__encode_invocation(trans, invocation, step_details=kwd.get( 'step_details', False)) invocation_response.update(rval) return invocation_response
def create(self, trans, payload, **kwd): """ POST /api/workflows Run or create workflows from the api. If installed_repository_file or from_history_id is specified a new workflow will be created for this user. Otherwise, workflow_id must be specified and this API method will cause a workflow to execute. :param installed_repository_file The path of a workflow to import. Either workflow_id, installed_repository_file or from_history_id must be specified :type installed_repository_file str :param workflow_id: An existing workflow id. Either workflow_id, installed_repository_file or from_history_id must be specified :type workflow_id: str :param parameters: If workflow_id is set - see _update_step_parameters() :type parameters: dict :param ds_map: If workflow_id is set - a dictionary mapping each input step id to a dictionary with 2 keys: 'src' (which can be 'ldda', 'ld' or 'hda') and 'id' (which should be the id of a LibraryDatasetDatasetAssociation, LibraryDataset or HistoryDatasetAssociation respectively) :type ds_map: dict :param no_add_to_history: If workflow_id is set - if present in the payload with any value, the input datasets will not be added to the selected history :type no_add_to_history: str :param history: If workflow_id is set - optional history where to run the workflow, either the name of a new history or "hist_id=HIST_ID" where HIST_ID is the id of an existing history. If not specified, the workflow will be run a new unnamed history :type history: str :param replacement_params: If workflow_id is set - an optional dictionary used when renaming datasets :type replacement_params: dict :param from_history_id: Id of history to extract a workflow from. Either workflow_id, installed_repository_file or from_history_id must be specified :type from_history_id: str :param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history :type job_ids: str :param dataset_ids: If from_history_id is set - optional list of HDA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_ids: str :param dataset_collection_ids: If from_history_id is set - optional list of HDCA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_collection_ids: str :param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history :type workflow_name: str :param allow_tool_state_corrections: If set to True, any Tool parameter changes will not prevent running workflow, defaults to False :type allow_tool_state_corrections: bool :param use_cached_job: If set to True galaxy will attempt to find previously executed steps for all workflow steps with the exact same parameter combinations and will copy the outputs of the previously executed step. """ ways_to_create = set([ 'archive_source', 'workflow_id', 'installed_repository_file', 'from_history_id', 'from_path', 'shared_workflow_id', 'workflow', ]) if len(ways_to_create.intersection(payload)) == 0: message = "One parameter among - %s - must be specified" % ", ".join( ways_to_create) raise exceptions.RequestParameterMissingException(message) if len(ways_to_create.intersection(payload)) > 1: message = "Only one parameter among - %s - must be specified" % ", ".join( ways_to_create) raise exceptions.RequestParameterInvalidException(message) if 'installed_repository_file' in payload: if not trans.user_is_admin: raise exceptions.AdminRequiredException() installed_repository_file = payload.get( 'installed_repository_file', '') if not os.path.exists(installed_repository_file): raise exceptions.MessageException( "Repository file '%s' not found.") elif os.path.getsize( os.path.abspath(installed_repository_file)) > 0: workflow_data = None with open(installed_repository_file, 'rb') as f: workflow_data = f.read() return self.__api_import_from_archive(trans, workflow_data) else: raise exceptions.MessageException( "You attempted to open an empty file.") if 'archive_source' in payload: archive_source = payload['archive_source'] archive_file = payload.get('archive_file') archive_data = None if archive_source: if archive_source.startswith("file://"): if not trans.user_is_admin: raise exceptions.AdminRequiredException() workflow_src = { "src": "from_path", "path": archive_source[len("file://"):] } payload["workflow"] = workflow_src return self.__api_import_new_workflow( trans, payload, **kwd) else: try: archive_data = requests.get(archive_source).text except Exception: raise exceptions.MessageException( "Failed to open URL '%s'." % escape(archive_source)) elif hasattr(archive_file, 'file'): uploaded_file = archive_file.file uploaded_file_name = uploaded_file.name if os.path.getsize(os.path.abspath(uploaded_file_name)) > 0: archive_data = uploaded_file.read() else: raise exceptions.MessageException( "You attempted to upload an empty file.") else: raise exceptions.MessageException( "Please provide a URL or file.") return self.__api_import_from_archive(trans, archive_data, "uploaded file") if 'from_history_id' in payload: from_history_id = payload.get('from_history_id') from_history_id = self.decode_id(from_history_id) history = self.history_manager.get_accessible( from_history_id, trans.user, current_history=trans.history) job_ids = [self.decode_id(_) for _ in payload.get('job_ids', [])] dataset_ids = payload.get('dataset_ids', []) dataset_collection_ids = payload.get('dataset_collection_ids', []) workflow_name = payload['workflow_name'] stored_workflow = extract_workflow( trans=trans, user=trans.get_user(), history=history, job_ids=job_ids, dataset_ids=dataset_ids, dataset_collection_ids=dataset_collection_ids, workflow_name=workflow_name, ) item = stored_workflow.to_dict( value_mapper={'id': trans.security.encode_id}) item['url'] = url_for('workflow', id=item['id']) return item if 'from_path' in payload: from_path = payload.get('from_path') payload["workflow"] = {"src": "from_path", "path": from_path} return self.__api_import_new_workflow(trans, payload, **kwd) if 'shared_workflow_id' in payload: workflow_id = payload['shared_workflow_id'] return self.__api_import_shared_workflow(trans, workflow_id, payload) if 'workflow' in payload: return self.__api_import_new_workflow(trans, payload, **kwd) workflow_id = payload.get('workflow_id', None) if not workflow_id: message = "Invalid workflow_id specified." raise exceptions.RequestParameterInvalidException(message) # Get workflow + accessibility check. stored_workflow = self.__get_stored_accessible_workflow( trans, workflow_id) workflow = stored_workflow.latest_workflow run_configs = build_workflow_run_configs(trans, workflow, payload) assert len(run_configs) == 1 run_config = run_configs[0] history = run_config.target_history # invoke may throw MessageExceptions on tool erors, failure # to match up inputs, etc... outputs, invocation = invoke( trans=trans, workflow=workflow, workflow_run_config=run_config, populate_state=True, ) trans.sa_session.flush() # Build legacy output - should probably include more information from # outputs. rval = {} rval['history'] = trans.security.encode_id(history.id) rval['outputs'] = [] if outputs: # Newer outputs don't necessarily fill outputs (?) for step in workflow.steps: if step.type == 'tool' or step.type is None: for v in outputs[step.id].values(): rval['outputs'].append(trans.security.encode_id(v.id)) # Newer version of this API just returns the invocation as a dict, to # facilitate migration - produce the newer style response and blend in # the older information. invocation_response = self.__encode_invocation(invocation, **kwd) invocation_response.update(rval) return invocation_response
def create(self, trans, payload, **kwd): """ POST /api/workflows Run or create workflows from the api. If installed_repository_file or from_history_id is specified a new workflow will be created for this user. Otherwise, workflow_id must be specified and this API method will cause a workflow to execute. :param installed_repository_file The path of a workflow to import. Either workflow_id, installed_repository_file or from_history_id must be specified :type installed_repository_file str :param workflow_id: An existing workflow id. Either workflow_id, installed_repository_file or from_history_id must be specified :type workflow_id: str :param parameters: If workflow_id is set - see _update_step_parameters() :type parameters: dict :param ds_map: If workflow_id is set - a dictionary mapping each input step id to a dictionary with 2 keys: 'src' (which can be 'ldda', 'ld' or 'hda') and 'id' (which should be the id of a LibraryDatasetDatasetAssociation, LibraryDataset or HistoryDatasetAssociation respectively) :type ds_map: dict :param no_add_to_history: If workflow_id is set - if present in the payload with any value, the input datasets will not be added to the selected history :type no_add_to_history: str :param history: If workflow_id is set - optional history where to run the workflow, either the name of a new history or "hist_id=HIST_ID" where HIST_ID is the id of an existing history. If not specified, the workflow will be run a new unnamed history :type history: str :param replacement_params: If workflow_id is set - an optional dictionary used when renaming datasets :type replacement_params: dict :param from_history_id: Id of history to extract a workflow from. Either workflow_id, installed_repository_file or from_history_id must be specified :type from_history_id: str :param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history :type job_ids: str :param dataset_ids: If from_history_id is set - optional list of HDA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_ids: str :param dataset_collection_ids: If from_history_id is set - optional list of HDCA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_collection_ids: str :param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history :type workflow_name: str :param allow_tool_state_corrections: If set to True, any Tool parameter changes will not prevent running workflow, defaults to False :type allow_tool_state_corrections: bool :param use_cached_job: If set to True galaxy will attempt to find previously executed steps for all workflow steps with the exact same parameter combinations and will copy the outputs of the previously executed step. """ ways_to_create = set([ 'archive_source', 'workflow_id', 'installed_repository_file', 'from_history_id', 'from_path', 'shared_workflow_id', 'workflow', ]) if len(ways_to_create.intersection(payload)) == 0: message = "One parameter among - %s - must be specified" % ", ".join(ways_to_create) raise exceptions.RequestParameterMissingException(message) if len(ways_to_create.intersection(payload)) > 1: message = "Only one parameter among - %s - must be specified" % ", ".join(ways_to_create) raise exceptions.RequestParameterInvalidException(message) if 'installed_repository_file' in payload: if not trans.user_is_admin: raise exceptions.AdminRequiredException() installed_repository_file = payload.get('installed_repository_file', '') if not os.path.exists(installed_repository_file): raise exceptions.MessageException("Repository file '%s' not found.") elif os.path.getsize(os.path.abspath(installed_repository_file)) > 0: workflow_data = None with open(installed_repository_file, 'rb') as f: workflow_data = f.read() return self.__api_import_from_archive(trans, workflow_data) else: raise exceptions.MessageException("You attempted to open an empty file.") if 'archive_source' in payload: archive_source = payload['archive_source'] archive_file = payload.get('archive_file') archive_data = None if archive_source: if archive_source.startswith("file://"): if not trans.user_is_admin: raise exceptions.AdminRequiredException() workflow_src = {"src": "from_path", "path": archive_source[len("file://"):]} payload["workflow"] = workflow_src return self.__api_import_new_workflow(trans, payload, **kwd) else: try: archive_data = requests.get(archive_source).text except Exception: raise exceptions.MessageException("Failed to open URL '%s'." % escape(archive_source)) elif hasattr(archive_file, 'file'): uploaded_file = archive_file.file uploaded_file_name = uploaded_file.name if os.path.getsize(os.path.abspath(uploaded_file_name)) > 0: archive_data = uploaded_file.read() else: raise exceptions.MessageException("You attempted to upload an empty file.") else: raise exceptions.MessageException("Please provide a URL or file.") return self.__api_import_from_archive(trans, archive_data, "uploaded file") if 'from_history_id' in payload: from_history_id = payload.get('from_history_id') from_history_id = self.decode_id(from_history_id) history = self.history_manager.get_accessible(from_history_id, trans.user, current_history=trans.history) job_ids = [self.decode_id(_) for _ in payload.get('job_ids', [])] dataset_ids = payload.get('dataset_ids', []) dataset_collection_ids = payload.get('dataset_collection_ids', []) workflow_name = payload['workflow_name'] stored_workflow = extract_workflow( trans=trans, user=trans.get_user(), history=history, job_ids=job_ids, dataset_ids=dataset_ids, dataset_collection_ids=dataset_collection_ids, workflow_name=workflow_name, ) item = stored_workflow.to_dict(value_mapper={'id': trans.security.encode_id}) item['url'] = url_for('workflow', id=item['id']) return item if 'from_path' in payload: from_path = payload.get('from_path') payload["workflow"] = {"src": "from_path", "path": from_path} return self.__api_import_new_workflow(trans, payload, **kwd) if 'shared_workflow_id' in payload: workflow_id = payload['shared_workflow_id'] return self.__api_import_shared_workflow(trans, workflow_id, payload) if 'workflow' in payload: return self.__api_import_new_workflow(trans, payload, **kwd) workflow_id = payload.get('workflow_id', None) if not workflow_id: message = "Invalid workflow_id specified." raise exceptions.RequestParameterInvalidException(message) # Get workflow + accessibility check. stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id) workflow = stored_workflow.latest_workflow run_configs = build_workflow_run_configs(trans, workflow, payload) assert len(run_configs) == 1 run_config = run_configs[0] history = run_config.target_history # invoke may throw MessageExceptions on tool erors, failure # to match up inputs, etc... outputs, invocation = invoke( trans=trans, workflow=workflow, workflow_run_config=run_config, populate_state=True, ) trans.sa_session.flush() # Build legacy output - should probably include more information from # outputs. rval = {} rval['history'] = trans.security.encode_id(history.id) rval['outputs'] = [] if outputs: # Newer outputs don't necessarily fill outputs (?) for step in workflow.steps: if step.type == 'tool' or step.type is None: for v in outputs[step.id].values(): rval['outputs'].append(trans.security.encode_id(v.id)) # Newer version of this API just returns the invocation as a dict, to # facilitate migration - produce the newer style response and blend in # the older information. invocation_response = self.__encode_invocation(invocation, **kwd) invocation_response.update(rval) return invocation_response
def create(self, trans, payload, **kwd): """ POST /api/workflows Run or create workflows from the api. If installed_repository_file or from_history_id is specified a new workflow will be created for this user. Otherwise, workflow_id must be specified and this API method will cause a workflow to execute. :param installed_repository_file The path of a workflow to import. Either workflow_id, installed_repository_file or from_history_id must be specified :type installed_repository_file str :param workflow_id: An existing workflow id. Either workflow_id, installed_repository_file or from_history_id must be specified :type workflow_id: str :param parameters: If workflow_id is set - see _update_step_parameters() :type parameters: dict :param ds_map: If workflow_id is set - a dictionary mapping each input step id to a dictionary with 2 keys: 'src' (which can be 'ldda', 'ld' or 'hda') and 'id' (which should be the id of a LibraryDatasetDatasetAssociation, LibraryDataset or HistoryDatasetAssociation respectively) :type ds_map: dict :param no_add_to_history: If workflow_id is set - if present in the payload with any value, the input datasets will not be added to the selected history :type no_add_to_history: str :param history: If workflow_id is set - optional history where to run the workflow, either the name of a new history or "hist_id=HIST_ID" where HIST_ID is the id of an existing history. If not specified, the workflow will be run a new unnamed history :type history: str :param replacement_params: If workflow_id is set - an optional dictionary used when renaming datasets :type replacement_params: dict :param from_history_id: Id of history to extract a workflow from. Either workflow_id, installed_repository_file or from_history_id must be specified :type from_history_id: str :param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history :type job_ids: str :param dataset_ids: If from_history_id is set - optional list of HDA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_ids: str :param dataset_collection_ids: If from_history_id is set - optional list of HDCA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_collection_ids: str :param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history :type workflow_name: str """ ways_to_create = set( ["workflow_id", "installed_repository_file", "from_history_id", "shared_workflow_id", "workflow"] ).intersection(payload) if len(ways_to_create) == 0: message = "One parameter among - %s - must be specified" % ", ".join(ways_to_create) raise exceptions.RequestParameterMissingException(message) if len(ways_to_create) > 1: message = "Only one parameter among - %s - must be specified" % ", ".join(ways_to_create) raise exceptions.RequestParameterInvalidException(message) if "installed_repository_file" in payload: workflow_controller = trans.webapp.controllers["workflow"] result = workflow_controller.import_workflow(trans=trans, cntrller="api", **payload) return result if "from_history_id" in payload: from_history_id = payload.get("from_history_id") from_history_id = self.decode_id(from_history_id) history = self.history_manager.get_accessible(from_history_id, trans.user, current_history=trans.history) job_ids = map(self.decode_id, payload.get("job_ids", [])) dataset_ids = payload.get("dataset_ids", []) dataset_collection_ids = payload.get("dataset_collection_ids", []) workflow_name = payload["workflow_name"] stored_workflow = extract_workflow( trans=trans, user=trans.get_user(), history=history, job_ids=job_ids, dataset_ids=dataset_ids, dataset_collection_ids=dataset_collection_ids, workflow_name=workflow_name, ) item = stored_workflow.to_dict(value_mapper={"id": trans.security.encode_id}) item["url"] = url_for("workflow", id=item["id"]) return item if "shared_workflow_id" in payload: workflow_id = payload["shared_workflow_id"] return self.__api_import_shared_workflow(trans, workflow_id, payload) if "workflow" in payload: return self.__api_import_new_workflow(trans, payload, **kwd) workflow_id = payload.get("workflow_id", None) if not workflow_id: message = "Invalid workflow_id specified." raise exceptions.RequestParameterInvalidException(message) # Get workflow + accessibility check. stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id) workflow = stored_workflow.latest_workflow run_config = build_workflow_run_config(trans, workflow, payload) history = run_config.target_history # invoke may throw MessageExceptions on tool erors, failure # to match up inputs, etc... outputs, invocation = invoke( trans=trans, workflow=workflow, workflow_run_config=run_config, populate_state=True ) trans.sa_session.flush() # Build legacy output - should probably include more information from # outputs. rval = {} rval["history"] = trans.security.encode_id(history.id) rval["outputs"] = [] for step in workflow.steps: if step.type == "tool" or step.type is None: for v in outputs[step.id].itervalues(): rval["outputs"].append(trans.security.encode_id(v.id)) # Newer version of this API just returns the invocation as a dict, to # facilitate migration - produce the newer style response and blend in # the older information. invocation_response = self.__encode_invocation(trans, invocation) invocation_response.update(rval) return invocation_response
def create(self, trans, payload, **kwd): """ POST /api/workflows Run or create workflows from the api. If installed_repository_file or from_history_id is specified a new workflow will be created for this user. Otherwise, workflow_id must be specified and this API method will cause a workflow to execute. :param installed_repository_file The path of a workflow to import. Either workflow_id, installed_repository_file or from_history_id must be specified :type installed_repository_file str :param workflow_id: An existing workflow id. Either workflow_id, installed_repository_file or from_history_id must be specified :type workflow_id: str :param parameters: If workflow_id is set - see _update_step_parameters() :type parameters: dict :param ds_map: If workflow_id is set - a dictionary mapping each input step id to a dictionary with 2 keys: 'src' (which can be 'ldda', 'ld' or 'hda') and 'id' (which should be the id of a LibraryDatasetDatasetAssociation, LibraryDataset or HistoryDatasetAssociation respectively) :type ds_map: dict :param no_add_to_history: If workflow_id is set - if present in the payload with any value, the input datasets will not be added to the selected history :type no_add_to_history: str :param history: If workflow_id is set - optional history where to run the workflow, either the name of a new history or "hist_id=HIST_ID" where HIST_ID is the id of an existing history. If not specified, the workflow will be run a new unnamed history :type history: str :param replacement_params: If workflow_id is set - an optional dictionary used when renaming datasets :type replacement_params: dict :param from_history_id: Id of history to extract a workflow from. Either workflow_id, installed_repository_file or from_history_id must be specified :type from_history_id: str :param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history :type job_ids: str :param dataset_ids: If from_history_id is set - optional list of HDA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_ids: str :param dataset_collection_ids: If from_history_id is set - optional list of HDCA `hid`s corresponding to workflow inputs when extracting a workflow from history :type dataset_collection_ids: str :param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history :type workflow_name: str """ if len( set([ 'workflow_id', 'installed_repository_file', 'from_history_id' ]).intersection(payload)) > 1: trans.response.status = 403 return "Only one among 'workflow_id', 'installed_repository_file', 'from_history_id' must be specified" if 'installed_repository_file' in payload: workflow_controller = trans.webapp.controllers['workflow'] result = workflow_controller.import_workflow(trans=trans, cntrller='api', **payload) return result if 'from_history_id' in payload: from_history_id = payload.get('from_history_id') history = self.get_history(trans, from_history_id, check_ownership=False, check_accessible=True) job_ids = map(trans.security.decode_id, payload.get('job_ids', [])) dataset_ids = payload.get('dataset_ids', []) dataset_collection_ids = payload.get('dataset_collection_ids', []) workflow_name = payload['workflow_name'] stored_workflow = extract_workflow( trans=trans, user=trans.get_user(), history=history, job_ids=job_ids, dataset_ids=dataset_ids, dataset_collection_ids=dataset_collection_ids, workflow_name=workflow_name, ) item = stored_workflow.to_dict( value_mapper={'id': trans.security.encode_id}) item['url'] = url_for('workflow', id=item['id']) return item workflow_id = payload.get('workflow_id', None) if not workflow_id: trans.response.status = 403 return "Either workflow_id, installed_repository_file or from_history_id must be specified" # Pull other parameters out of payload. param_map = payload.get('parameters', {}) inputs = payload.get('inputs', None) inputs_by = payload.get('inputs_by', None) if inputs is None: # Default to legacy behavior - read ds_map and reference steps # by unencoded step id (a raw database id). inputs = payload.get('ds_map', {}) inputs_by = inputs_by or 'step_id' else: inputs = inputs or {} # New default is to reference steps by index of workflow step # which is intrinsic to the workflow and independent of the state # of Galaxy at the time of workflow import. inputs_by = inputs_by or 'step_index' valid_inputs_by = ['step_id', 'step_index', 'name'] if inputs_by not in valid_inputs_by: trans.response.status = 403 error_message_template = "Invalid inputs_by specified '%s' must be one of %s" error_message = error_message_template % (inputs_by, valid_inputs_by) raise ValueError(error_message) add_to_history = 'no_add_to_history' not in payload history_param = payload.get('history', '') # Get workflow + accessibility check. stored_workflow = trans.sa_session.query( self.app.model.StoredWorkflow).get( trans.security.decode_id(workflow_id)) if stored_workflow.user != trans.user and not trans.user_is_admin(): if trans.sa_session.query( trans.app.model.StoredWorkflowUserShareAssociation ).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0: trans.response.status = 400 return ("Workflow is not owned by or shared with current user") workflow = stored_workflow.latest_workflow # Sanity checks. if not workflow: trans.response.status = 400 return "Workflow not found." if len(workflow.steps) == 0: trans.response.status = 400 return "Workflow cannot be run because it does not have any steps" if workflow.has_cycles: trans.response.status = 400 return "Workflow cannot be run because it contains cycles" if workflow.has_errors: trans.response.status = 400 return "Workflow cannot be run because of validation errors in some steps" # Get target history. if history_param.startswith('hist_id='): # Passing an existing history to use. history = trans.sa_session.query(self.app.model.History).get( trans.security.decode_id(history_param[8:])) if history.user != trans.user and not trans.user_is_admin(): trans.response.status = 400 return "Invalid History specified." else: # Send workflow outputs to new history. history = self.app.model.History(name=history_param, user=trans.user) trans.sa_session.add(history) trans.sa_session.flush() # Set workflow inputs. for k in inputs: try: if inputs[k]['src'] == 'ldda': ldda = trans.sa_session.query( self.app.model.LibraryDatasetDatasetAssociation).get( trans.security.decode_id(inputs[k]['id'])) assert trans.user_is_admin( ) or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset) content = ldda.to_history_dataset_association( history, add_to_history=add_to_history) elif inputs[k]['src'] == 'ld': ldda = trans.sa_session.query( self.app.model.LibraryDataset).get( trans.security.decode_id( inputs[k] ['id'])).library_dataset_dataset_association assert trans.user_is_admin( ) or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset) content = ldda.to_history_dataset_association( history, add_to_history=add_to_history) elif inputs[k]['src'] == 'hda': # Get dataset handle, add to dict and history if necessary content = trans.sa_session.query( self.app.model.HistoryDatasetAssociation).get( trans.security.decode_id(inputs[k]['id'])) assert trans.user_is_admin( ) or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), content.dataset) elif inputs[k]['src'] == 'hdca': content = self.app.dataset_collections_service.get_dataset_collection_instance( trans, 'history', inputs[k]['id']) else: trans.response.status = 400 return "Unknown dataset source '%s' specified." % inputs[ k]['src'] if add_to_history and content.history != history: content = content.copy() if isinstance(content, self.app.model.HistoryDatasetAssociation): history.add_dataset(content) else: history.add_dataset_collection(content) inputs[k][ 'hda'] = content # TODO: rename key to 'content', prescreen input ensure not populated explicitly except AssertionError: trans.response.status = 400 return "Invalid Dataset '%s' Specified" % inputs[k]['id'] # Run each step, connecting outputs to inputs replacement_dict = payload.get('replacement_params', {}) run_config = WorkflowRunConfig( target_history=history, replacement_dict=replacement_dict, inputs=inputs, inputs_by=inputs_by, param_map=param_map, ) outputs = invoke( trans=trans, workflow=workflow, workflow_run_config=run_config, ) trans.sa_session.flush() # Build legacy output - should probably include more information from # outputs. rval = {} rval['history'] = trans.security.encode_id(history.id) rval['outputs'] = [] for step in workflow.steps: if step.type == 'tool' or step.type is None: for v in outputs[step.id].itervalues(): rval['outputs'].append(trans.security.encode_id(v.id)) return rval
def create(self, trans, payload, **kwd): """ POST /api/workflows We're not creating workflows from the api. Just execute for now. However, we will import them if installed_repository_file is specified """ # Pull parameters out of payload. workflow_id = payload['workflow_id'] param_map = payload.get('parameters', {}) ds_map = payload['ds_map'] add_to_history = 'no_add_to_history' not in payload history_param = payload['history'] # Get/create workflow. if not workflow_id: # create new if 'installed_repository_file' in payload: workflow_controller = trans.webapp.controllers[ 'workflow' ] result = workflow_controller.import_workflow( trans=trans, cntrller='api', **payload) return result trans.response.status = 403 return "Either workflow_id or installed_repository_file must be specified" if 'installed_repository_file' in payload: trans.response.status = 403 return "installed_repository_file may not be specified with workflow_id" # Get workflow + accessibility check. stored_workflow = trans.sa_session.query(self.app.model.StoredWorkflow).get( trans.security.decode_id(workflow_id)) if stored_workflow.user != trans.user and not trans.user_is_admin(): if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0: trans.response.status = 400 return("Workflow is not owned by or shared with current user") workflow = stored_workflow.latest_workflow # Get target history. if history_param.startswith('hist_id='): #Passing an existing history to use. history = trans.sa_session.query(self.app.model.History).get( trans.security.decode_id(history_param[8:])) if history.user != trans.user and not trans.user_is_admin(): trans.response.status = 400 return "Invalid History specified." else: # Send workflow outputs to new history. history = self.app.model.History(name=history_param, user=trans.user) trans.sa_session.add(history) trans.sa_session.flush() # Set workflow inputs. for k in ds_map: try: if ds_map[k]['src'] == 'ldda': ldda = trans.sa_session.query(self.app.model.LibraryDatasetDatasetAssociation).get( trans.security.decode_id(ds_map[k]['id'])) assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) hda = ldda.to_history_dataset_association(history, add_to_history=add_to_history) elif ds_map[k]['src'] == 'ld': ldda = trans.sa_session.query(self.app.model.LibraryDataset).get( trans.security.decode_id(ds_map[k]['id'])).library_dataset_dataset_association assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) hda = ldda.to_history_dataset_association(history, add_to_history=add_to_history) elif ds_map[k]['src'] == 'hda': # Get dataset handle, add to dict and history if necessary hda = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get( trans.security.decode_id(ds_map[k]['id'])) assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), hda.dataset ) else: trans.response.status = 400 return "Unknown dataset source '%s' specified." % ds_map[k]['src'] if add_to_history and hda.history != history: hda = hda.copy() history.add_dataset(hda) ds_map[k]['hda'] = hda except AssertionError: trans.response.status = 400 return "Invalid Dataset '%s' Specified" % ds_map[k]['id'] # Sanity checks. if not workflow: trans.response.status = 400 return "Workflow not found." if len( workflow.steps ) == 0: trans.response.status = 400 return "Workflow cannot be run because it does not have any steps" if workflow.has_cycles: trans.response.status = 400 return "Workflow cannot be run because it contains cycles" if workflow.has_errors: trans.response.status = 400 return "Workflow cannot be run because of validation errors in some steps" # Build the state for each step rval = {} for step in workflow.steps: step_errors = None input_connections_by_name = {} for conn in step.input_connections: input_name = conn.input_name if not input_name in input_connections_by_name: input_connections_by_name[input_name] = [] input_connections_by_name[input_name].append(conn) step.input_connections_by_name = input_connections_by_name if step.type == 'tool' or step.type is None: step.module = module_factory.from_workflow_step( trans, step ) # Check for missing parameters step.upgrade_messages = step.module.check_and_update_state() # Any connected input needs to have value DummyDataset (these # are not persisted so we need to do it every time) step.module.add_dummy_datasets( connections=step.input_connections ) step.state = step.module.state _update_step_parameters(step, param_map) if step.tool_errors: trans.response.status = 400 return "Workflow cannot be run because of validation errors in some steps: %s" % step_errors if step.upgrade_messages: trans.response.status = 400 return "Workflow cannot be run because of step upgrade messages: %s" % step.upgrade_messages else: # This is an input step. Make sure we have an available input. if step.type == 'data_input' and str(step.id) not in ds_map: trans.response.status = 400 return "Workflow cannot be run because an expected input step '%s' has no input dataset." % step.id step.module = module_factory.from_workflow_step( trans, step ) step.state = step.module.get_runtime_state() # Run each step, connecting outputs to inputs outputs = util.odict.odict() rval['history'] = trans.security.encode_id(history.id) rval['outputs'] = [] replacement_dict = payload.get('replacement_params', {}) outputs = invoke( trans=trans, workflow=workflow, target_history=history, replacement_dict=replacement_dict, ds_map=ds_map, ) trans.sa_session.flush() # Build legacy output - should probably include more information from # outputs. for step in workflow.steps: if step.type == 'tool' or step.type is None: for v in outputs[ step.id ].itervalues(): rval[ 'outputs' ].append( trans.security.encode_id( v.id ) ) return rval