def create_tool(self, trans, tool_payload, allow_load=True): src = tool_payload.get("src", "representation") is_path = src == "from_path" if is_path: tool_format, representation, object_id = artifact_class(None, tool_payload) else: assert src == "representation" if "representation" not in tool_payload: raise exceptions.ObjectAttributeMissingException( "A tool 'representation' is required." ) representation = tool_payload["representation"] if "class" not in representation: raise exceptions.ObjectAttributeMissingException( "Current tool representations require 'class'." ) enable_beta_formats = getattr(self.app.config, "enable_beta_tool_formats", False) if not enable_beta_formats: raise exceptions.ConfigDoesNotAllowException("Set 'enable_beta_tool_formats' in Galaxy config to create dynamic tools.") tool_format = representation["class"] tool_directory = tool_payload.get("tool_directory", None) tool_path = None if tool_format == "GalaxyTool": uuid = tool_payload.get("uuid", None) if uuid is None: uuid = uuid4() tool_id = representation.get("id", None) if tool_id is None: tool_id = str(uuid) tool_version = representation.get("version", None) value = representation else: raise Exception("Unknown tool type encountered.") # TODO: enforce via DB constraint and catch appropriate # exception. existing_tool = self.get_tool_by_uuid(uuid) if existing_tool is not None and not allow_load: raise DuplicatedIdentifierException(existing_tool.id) elif existing_tool: dynamic_tool = existing_tool else: dynamic_tool = self.create( tool_format=tool_format, tool_id=tool_id, tool_version=tool_version, tool_path=tool_path, tool_directory=tool_directory, uuid=uuid, value=value, ) self.app.toolbox.load_dynamic_tool(dynamic_tool) return dynamic_tool
def create(self, trans, payload, **kwd): """ create( self, trans, payload, **kwd ) * POST /api/pages Create a page and return dictionary containing Page summary :param payload: dictionary structure containing:: 'slug' = The title slug for the page URL, must be unique 'title' = Title of the page 'content' = HTML contents of the page 'annotation' = Annotation that will be attached to the page :rtype: dict :returns: Dictionary return of the Page.to_dict call """ user = trans.get_user() if not payload.get("title", None): raise exceptions.ObjectAttributeMissingException( "Page name is required") elif not payload.get("slug", None): raise exceptions.ObjectAttributeMissingException( "Page id is required") elif not self._is_valid_slug(payload["slug"]): raise exceptions.ObjectAttributeInvalidException( "Page identifier must consist of only lowercase letters, numbers, and the '-' character" ) elif trans.sa_session.query(trans.app.model.Page).filter_by( user=user, slug=payload["slug"], deleted=False).first(): raise exceptions.DuplicatedSlugException( "Page slug must be unique") content = payload.get("content", "") content = sanitize_html(content, 'utf-8', 'text/html') # Create the new stored page page = trans.app.model.Page() page.title = payload['title'] page.slug = payload['slug'] page_annotation = sanitize_html(payload.get("annotation", ""), 'utf-8', 'text/html') self.add_item_annotation(trans.sa_session, trans.get_user(), page, page_annotation) page.user = user # And the first (empty) page revision page_revision = trans.app.model.PageRevision() page_revision.title = payload['title'] page_revision.page = page page.latest_revision = page_revision page_revision.content = content # Persist session = trans.sa_session session.add(page) session.flush() rval = self.encode_all_ids(trans, page.to_dict(), True) return rval
def search(self, trans, payload, **kwd): """ search( trans, payload ) * POST /api/jobs/search: return jobs for current user :type payload: dict :param payload: Dictionary containing description of requested job. This is in the same format as a request to POST /apt/tools would take to initiate a job :rtype: list :returns: list of dictionaries containing summary job information of the jobs that match the requested job run This method is designed to scan the list of previously run jobs and find records of jobs that had the exact some input parameters and datasets. This can be used to minimize the amount of repeated work, and simply recycle the old results. """ tool_id = payload.get('tool_id') if tool_id is None: raise exceptions.ObjectAttributeMissingException("No tool id") tool = trans.app.toolbox.get_tool(tool_id) if tool is None: raise exceptions.ObjectNotFound("Requested tool not found") if 'inputs' not in payload: raise exceptions.ObjectAttributeMissingException( "No inputs defined") inputs = payload.get('inputs', {}) # Find files coming in as multipart file data and add to inputs. for k, v in payload.items(): if k.startswith('files_') or k.startswith('__files_'): inputs[k] = v request_context = WorkRequestContext(app=trans.app, user=trans.user, history=trans.history) all_params, all_errors, _, _ = tool.expand_incoming( trans=trans, incoming=inputs, request_context=request_context) if any(all_errors): return [] params_dump = [ tool.params_to_strings(param, self.app, nested=True) for param in all_params ] jobs = [] for param_dump, param in zip(params_dump, all_params): job = self.job_search.by_tool_input(trans=trans, tool_id=tool_id, tool_version=tool.version, param=param, param_dump=param_dump, job_state=payload.get('state')) if job: jobs.append(job) return [ self.encode_all_ids(trans, single_job.to_dict('element'), True) for single_job in jobs ]
def create(self, trans, payload): user = trans.get_user() if not payload.get("title"): raise exceptions.ObjectAttributeMissingException( "Page name is required") elif not payload.get("slug"): raise exceptions.ObjectAttributeMissingException( "Page id is required") elif not base.is_valid_slug(payload["slug"]): raise exceptions.ObjectAttributeInvalidException( "Page identifier must consist of only lowercase letters, numbers, and the '-' character" ) elif trans.sa_session.query(trans.app.model.Page).filter_by( user=user, slug=payload["slug"], deleted=False).first(): raise exceptions.DuplicatedSlugException( "Page identifier must be unique") if payload.get("invocation_id"): invocation_id = payload.get("invocation_id") invocation_report = self.workflow_manager.get_invocation_report( trans, invocation_id) content = invocation_report.get("markdown") content_format = "markdown" else: content = payload.get("content", "") content_format = payload.get("content_format", "html") content = self.rewrite_content_for_import(trans, content, content_format) # Create the new stored page page = trans.app.model.Page() page.title = payload['title'] page.slug = payload['slug'] page_annotation = payload.get("annotation", None) if page_annotation is not None: page_annotation = sanitize_html(page_annotation) self.add_item_annotation(trans.sa_session, trans.get_user(), page, page_annotation) page.user = user # And the first (empty) page revision page_revision = trans.app.model.PageRevision() page_revision.title = payload['title'] page_revision.page = page page.latest_revision = page_revision page_revision.content = content page_revision.content_format = content_format # Persist session = trans.sa_session session.add(page) session.flush() return page
def save_new_revision(self, trans, page, payload): # Assumes security has already been checked by caller. content = payload.get("content", None) content_format = payload.get("content_format", None) if not content: raise exceptions.ObjectAttributeMissingException("content undefined or empty") if content_format not in [None, PageContentFormat.html.value, PageContentFormat.markdown.value]: raise exceptions.RequestParameterInvalidException(f"content_format [{content_format}], if specified, must be either html or markdown") if 'title' in payload: title = payload['title'] else: title = page.title if content_format is None: content_format = page.latest_revision.content_format content = self.rewrite_content_for_import(trans, content, content_format=content_format) page_revision = trans.app.model.PageRevision() page_revision.title = title page_revision.page = page page.latest_revision = page_revision page_revision.content = content page_revision.content_format = content_format # Persist session = trans.sa_session session.flush() return page_revision
def import_shared_workflow_deprecated(self, trans, payload, **kwd): """ POST /api/workflows/import Import a workflow shared by other users. :param workflow_id: the workflow id (required) :type workflow_id: str :raises: exceptions.MessageException, exceptions.ObjectNotFound """ # Pull parameters out of payload. workflow_id = payload.get('workflow_id', None) if workflow_id is None: raise exceptions.ObjectAttributeMissingException("Missing required parameter 'workflow_id'.") self.__api_import_shared_workflow(trans, workflow_id, payload)
def api_payload_to_create_params(payload): """ Cleanup API payload to pass into dataset_collections. """ required_parameters = ["collection_type", "element_identifiers"] missing_parameters = [p for p in required_parameters if p not in payload] if missing_parameters: message = "Missing required parameters %s" % missing_parameters raise exceptions.ObjectAttributeMissingException(message) params = dict( collection_type=payload.get("collection_type"), element_identifiers=payload.get("element_identifiers"), name=payload.get("name", None), ) return params
def __authorize_job_access(self, trans, encoded_job_id, **kwargs): for key in ["path", "job_key"]: if key not in kwargs: error_message = f"Job files action requires a valid '{key}'." raise exceptions.ObjectAttributeMissingException(error_message) job_id = trans.security.decode_id(encoded_job_id) job_key = trans.security.encode_id(job_id, kind="jobs_files") if not util.safe_str_cmp(kwargs["job_key"], job_key): raise exceptions.ItemAccessibilityException("Invalid job_key supplied.") # Verify job is active. Don't update the contents of complete jobs. job = trans.sa_session.query(model.Job).get(job_id) if job.finished: error_message = "Attempting to read or modify the files of a job that has already completed." raise exceptions.ItemAccessibilityException(error_message) return job
def api_payload_to_create_params(payload): """ Cleanup API payload to pass into dataset_collections. """ required_parameters = ["collection_type", "element_identifiers"] missing_parameters = [p for p in required_parameters if p not in payload] if missing_parameters: message = f"Missing required parameters {missing_parameters}" raise exceptions.ObjectAttributeMissingException(message) params = dict(collection_type=payload.get("collection_type"), element_identifiers=payload.get("element_identifiers"), name=payload.get("name", None), hide_source_items=string_as_bool( payload.get("hide_source_items", False)), copy_elements=string_as_bool( payload.get("copy_elements", False))) return params
def __authorize_job_access(self, encoded_job_id, **kwargs): key = "job_key" if key not in kwargs: error_message = "Job files action requires a valid '%s'." % key raise exceptions.ObjectAttributeMissingException(error_message) job_id = self._security.decode_id(encoded_job_id) job_key = self._security.encode_id(job_id, kind="jobs_files") if not util.safe_str_cmp(kwargs["job_key"], job_key): raise exceptions.ItemAccessibilityException("Invalid job_key supplied.") # Verify job is active. Don't update the contents of complete jobs. sa_session = self._app.model.context.current job = sa_session.query(model.Job).get(job_id) if not job.running: error_message = "Attempting to read or modify the files of a job that has already completed." raise exceptions.ItemAccessibilityException(error_message) return job
def create(self, trans, page_id, payload, **kwd): """ create( self, trans, page_id, payload **kwd ) * POST /api/pages/{page_id}/revisions Create a new revision for a page :param page_id: Add revision to Page with ID=page_id :param payload: A dictionary containing:: 'title' = New title of the page 'content' = New content of the page :rtype: dictionary :returns: Dictionary with 'success' or 'error' element to indicate the result of the request """ content = payload.get("content", None) if not content: raise exceptions.ObjectAttributeMissingException( "content undefined or empty") page = self._get_page(trans, page_id) self._verify_page_ownership(trans, page) if 'title' in payload: title = payload['title'] else: title = page.title content = sanitize_html(content, 'utf-8', 'text/html') page_revision = trans.app.model.PageRevision() page_revision.title = title page_revision.page = page page.latest_revision = page_revision page_revision.content = content # Persist session = trans.sa_session session.flush() return page_revision.to_dict(view="element")
def save_new_revision(self, trans, page, payload): # Assumes security has already been checked by caller. content = payload.get("content", None) if not content: raise exceptions.ObjectAttributeMissingException("content undefined or empty") if 'title' in payload: title = payload['title'] else: title = page.title content = self.rewrite_content_for_import(trans, content) page_revision = trans.app.model.PageRevision() page_revision.title = title page_revision.page = page page.latest_revision = page_revision page_revision.content = content # Persist session = trans.sa_session session.flush() return page_revision
def import_shared_workflow(self, trans, payload, **kwd): """ POST /api/workflows/import Import a workflow shared by other users. :param workflow_id: the workflow id (required) :type workflow_id: str :raises: exceptions.MessageException, exceptions.ObjectNotFound """ # Pull parameters out of payload. workflow_id = payload.get('workflow_id', None) if workflow_id is None: raise exceptions.ObjectAttributeMissingException( "Missing required parameter 'workflow_id'.") try: stored_workflow = self.get_stored_workflow(trans, workflow_id, check_ownership=False) except: raise exceptions.ObjectNotFound( "Malformed workflow id ( %s ) specified." % workflow_id) if stored_workflow.importable is False: raise exceptions.MessageException( 'The owner of this workflow has disabled imports via this link.' ) elif stored_workflow.deleted: raise exceptions.MessageException( "You can't import this workflow because it has been deleted.") imported_workflow = self._import_shared_workflow( trans, stored_workflow) item = imported_workflow.to_dict( value_mapper={'id': trans.security.encode_id}) encoded_id = trans.security.encode_id(imported_workflow.id) item['url'] = url_for('workflow', id=encoded_id) return item
def set_information(self, trans, id, payload={}, **kwd): """ PUT /api/users/{id}/information/inputs Save a user's email, username, addresses etc. :param id: the encoded id of the user :type id: str :param payload: data with new settings :type payload: dict """ user = self._get_user(trans, id) # Update email if 'email' in payload: email = payload.get('email') message = self._validate_email(email) or validate_email( trans, email, user) if message: raise exceptions.RequestParameterInvalidException(message) if user.email != email: # Update user email and user's private role name which must match private_role = trans.app.security_agent.get_private_user_role( user) private_role.name = email private_role.description = 'Private role for ' + email user.email = email trans.sa_session.add(user) trans.sa_session.add(private_role) trans.sa_session.flush() if trans.app.config.user_activation_on: # Deactivate the user if email was changed and activation is on. user.active = False if self.user_manager.send_activation_email( trans, user.email, user.username): message = 'The login information has been updated with the changes.<br>Verification email has been sent to your new email address. Please verify it by clicking the activation link in the email.<br>Please check your spam/trash folder in case you cannot find the message.' else: message = 'Unable to send activation email, please contact your local Galaxy administrator.' if trans.app.config.error_email_to is not None: message += ' Contact: %s' % trans.app.config.error_email_to raise exceptions.InternalServerError(message) # Update public name if 'username' in payload: username = payload.get('username') message = self._validate_publicname( username) or validate_publicname(trans, username, user) if message: raise exceptions.RequestParameterInvalidException(message) if user.username != username: user.username = username # Update user custom form user_info_form_id = payload.get('info|form_id') if user_info_form_id: prefix = 'info|' user_info_form = trans.sa_session.query( trans.app.model.FormDefinition).get( trans.security.decode_id(user_info_form_id)) user_info_values = {} for item in payload: if item.startswith(prefix): user_info_values[item[len(prefix):]] = payload[item] form_values = trans.model.FormValues(user_info_form, user_info_values) trans.sa_session.add(form_values) user.values = form_values # Update values for extra user preference items extra_user_pref_data = dict() extra_pref_keys = self._get_extra_user_preferences(trans) if extra_pref_keys is not None: for key in extra_pref_keys: key_prefix = key + '|' for item in payload: if item.startswith(key_prefix): # Show error message if the required field is empty if payload[item] == "": # Raise an exception when a required field is empty while saving the form keys = item.split("|") section = extra_pref_keys[keys[0]] for input in section['inputs']: if input['name'] == keys[1] and input[ 'required']: raise exceptions.ObjectAttributeMissingException( "Please fill the required field") extra_user_pref_data[item] = payload[item] user.preferences["extra_user_preferences"] = json.dumps( extra_user_pref_data) # Update user addresses address_dicts = {} address_count = 0 for item in payload: match = re.match(r'^address_(?P<index>\d+)\|(?P<attribute>\S+)', item) if match: groups = match.groupdict() index = int(groups['index']) attribute = groups['attribute'] address_dicts[index] = address_dicts.get(index) or {} address_dicts[index][attribute] = payload[item] address_count = max(address_count, index + 1) user.addresses = [] for index in range(0, address_count): d = address_dicts[index] if d.get('id'): try: user_address = trans.sa_session.query( trans.app.model.UserAddress).get( trans.security.decode_id(d['id'])) except Exception as e: raise exceptions.ObjectNotFound( 'Failed to access user address ({}). {}'.format( d['id'], e)) else: user_address = trans.model.UserAddress() trans.log_event('User address added') for field in AddressField.fields(): if str(field[2]).lower() == 'required' and not d.get(field[0]): raise exceptions.ObjectAttributeMissingException( 'Address {}: {} ({}) required.'.format( index + 1, field[1], field[0])) setattr(user_address, field[0], str(d.get(field[0], ''))) user_address.user = user user.addresses.append(user_address) trans.sa_session.add(user_address) trans.sa_session.add(user) trans.sa_session.flush() trans.log_event('User information added') return {'message': 'User information has been saved.'}
def search(self, trans, payload, **kwd): """ search( trans, payload ) * POST /api/jobs/search: return jobs for current user :type payload: dict :param payload: Dictionary containing description of requested job. This is in the same format as a request to POST /apt/tools would take to initiate a job :rtype: list :returns: list of dictionaries containing summary job information of the jobs that match the requested job run This method is designed to scan the list of previously run jobs and find records of jobs that had the exact some input parameters and datasets. This can be used to minimize the amount of repeated work, and simply recycle the old results. """ tool_id = None if 'tool_id' in payload: tool_id = payload.get('tool_id') if tool_id is None: raise exceptions.ObjectAttributeMissingException("No tool id") tool = trans.app.toolbox.get_tool(tool_id) if tool is None: raise exceptions.ObjectNotFound("Requested tool not found") if 'inputs' not in payload: raise exceptions.ObjectAttributeMissingException( "No inputs defined") inputs = payload['inputs'] input_data = {} input_param = {} for k, v in inputs.items(): if isinstance(v, dict): if 'id' in v: if 'src' not in v or v['src'] == 'hda': hda_id = self.decode_id(v['id']) dataset = self.hda_manager.get_accessible( hda_id, trans.user) else: dataset = self.get_library_dataset_dataset_association( trans, v['id']) if dataset is None: raise exceptions.ObjectNotFound( "Dataset %s not found" % (v['id'])) input_data[k] = dataset.dataset_id else: input_param[k] = json.dumps(str(v)) query = trans.sa_session.query(trans.app.model.Job).filter( trans.app.model.Job.tool_id == tool_id, trans.app.model.Job.user == trans.user) if 'state' not in payload: query = query.filter( or_( trans.app.model.Job.state == 'running', trans.app.model.Job.state == 'queued', trans.app.model.Job.state == 'waiting', trans.app.model.Job.state == 'running', trans.app.model.Job.state == 'ok', )) else: if isinstance(payload['state'], string_types): query = query.filter( trans.app.model.Job.state == payload['state']) elif isinstance(payload['state'], list): o = [] for s in payload['state']: o.append(trans.app.model.Job.state == s) query = query.filter(or_(*o)) for k, v in input_param.items(): a = aliased(trans.app.model.JobParameter) query = query.filter( and_(trans.app.model.Job.id == a.job_id, a.name == k, a.value == v)) for k, v in input_data.items(): # Here we are attempting to link the inputs to the underlying # dataset (not the dataset association). # This way, if the calculation was done using a copied HDA # (copied from the library or another history), the search will # still find the job a = aliased(trans.app.model.JobToInputDatasetAssociation) b = aliased(trans.app.model.HistoryDatasetAssociation) query = query.filter( and_(trans.app.model.Job.id == a.job_id, a.dataset_id == b.id, b.deleted == false(), b.dataset_id == v)) out = [] for job in query.all(): # check to make sure none of the output files have been deleted if all( list(a.dataset.deleted is False for a in job.output_datasets)): out.append( self.encode_all_ids(trans, job.to_dict('element'), True)) return out
def create_tool(self, trans, tool_payload, allow_load=True): if not getattr(self.app.config, "enable_beta_tool_formats", False): raise exceptions.ConfigDoesNotAllowException("Set 'enable_beta_tool_formats' in Galaxy config to create dynamic tools.") dynamic_tool = None uuid_str = tool_payload.get("uuid") # Convert uuid_str to UUID or generate new if None uuid = model.get_uuid(uuid_str) if uuid_str: # TODO: enforce via DB constraint and catch appropriate # exception. dynamic_tool = self.get_tool_by_uuid(uuid_str) if dynamic_tool: if not allow_load: raise DuplicatedIdentifierException(dynamic_tool.id) assert dynamic_tool.uuid == uuid if not dynamic_tool: src = tool_payload.get("src", "representation") is_path = src == "from_path" if is_path: tool_format, representation, _ = artifact_class(None, tool_payload) else: assert src == "representation" representation = tool_payload.get("representation") if not representation: raise exceptions.ObjectAttributeMissingException( "A tool 'representation' is required." ) tool_format = representation.get("class") if not tool_format: raise exceptions.ObjectAttributeMissingException( "Current tool representations require 'class'." ) tool_path = tool_payload.get("path") tool_directory = tool_payload.get("tool_directory") if tool_format == "GalaxyTool": tool_id = representation.get("id") if not tool_id: tool_id = str(uuid) elif tool_format in ("CommandLineTool", "ExpressionTool"): # CWL tools if is_path: proxy = tool_proxy(tool_path=tool_path, uuid=uuid) else: # Build a tool proxy so that we can convert to the persistable # hash. proxy = tool_proxy( tool_object=representation["raw_process_reference"], tool_directory=tool_directory, uuid=uuid, ) tool_id = proxy.galaxy_id() else: raise Exception(f"Unknown tool format [{tool_format}] encountered.") tool_version = representation.get("version") dynamic_tool = self.create( tool_format=tool_format, tool_id=tool_id, tool_version=tool_version, tool_path=tool_path, tool_directory=tool_directory, uuid=uuid, value=representation, ) self.app.toolbox.load_dynamic_tool(dynamic_tool) return dynamic_tool