def get_permissions(self, trans, encoded_folder_id, scope=None, page=None, page_limit=None, query=None): """ Load all permissions for the given folder id and return it. :param encoded_folder_id: the encoded id of the folder :type encoded_folder_id: an encoded id string :param scope: either 'current' or 'available' :type scope: string :returns: dictionary with all applicable permissions' values :rtype: dictionary :raises: InsufficientPermissionsException """ current_user_roles = trans.get_current_user_roles() is_admin = trans.user_is_admin decoded_folder_id = self.folder_manager.cut_and_decode( trans, encoded_folder_id) folder = self.folder_manager.get(trans, decoded_folder_id) if not (is_admin or trans.app.security_agent.can_manage_library_item( current_user_roles, folder)): raise InsufficientPermissionsException( 'You do not have proper permission to access permissions of this folder.' ) if scope == 'current' or scope is None: return self.folder_manager.get_current_roles(trans, folder) # Return roles that are available to select. elif scope == 'available': if page is not None: page = int(page) else: page = 1 if page_limit is not None: page_limit = int(page_limit) else: page_limit = 10 roles, total_roles = trans.app.security_agent.get_valid_roles( trans, folder, query, page, page_limit) return_roles = [] for role in roles: role_id = trans.security.encode_id(role.id) return_roles.append( dict(id=role_id, name=role.name, type=role.type)) return dict(roles=return_roles, page=page, page_limit=page_limit, total=total_roles) else: raise RequestParameterInvalidException( "The value of 'scope' parameter is invalid. Alllowed values: current, available" )
def get(self, trans, decoded_folder_id, check_manageable=False, check_accessible=True): """ Get the folder from the DB. :param decoded_folder_id: decoded folder id :type decoded_folder_id: int :param check_manageable: flag whether the check that user can manage item :type check_manageable: bool :param check_accessible: flag whether to check that user can access item :type check_accessible: bool :returns: the requested folder :rtype: LibraryFolder :raises: InconsistentDatabase, RequestParameterInvalidException, InternalServerError """ try: folder = trans.sa_session.query(trans.app.model.LibraryFolder).filter(trans.app.model.LibraryFolder.table.c.id == decoded_folder_id).one() except MultipleResultsFound: raise InconsistentDatabase('Multiple folders found with the same id.') except NoResultFound: raise RequestParameterInvalidException('No folder found with the id provided.') except Exception as e: raise InternalServerError('Error loading from the database.' + unicodify(e)) folder = self.secure(trans, folder, check_manageable, check_accessible) return folder
def get(self, trans, decoded_group_id=None, name=None): """ Get the group from the DB based on its ID or name. :param decoded_group_id: decoded group id :type decoded_group_id: int :returns: the requested group :rtype: Group """ if decoded_group_id is None and name is None: raise RequestParameterInvalidException( 'You must supply either ID or a name of the group.') name_query = trans.sa_session.query(trans.app.model.Group).filter( trans.app.model.Group.table.c.name == name) id_query = trans.sa_session.query(trans.app.model.Group).filter( trans.app.model.Group.table.c.id == decoded_group_id) try: group = id_query.one() if decoded_group_id else name_query.one() except MultipleResultsFound: raise InconsistentDatabase( 'Multiple groups found with the same identifier.') except NoResultFound: raise ObjectNotFound( 'No group found with the identifier provided.') except Exception: raise InternalServerError('Error loading from the database.') return group
def _apply_disconnect(self, action: DisconnectAction, execution: RefactorActionExecution): input_step_dict, input_name, output_step_dict, output_name = self._connection( action) output_order_index = output_step_dict[ "id"] # wish this was order_index... # default name is name used for input's output terminal - following # format2 convention of allowing this be absent for clean references # to workflow inputs. all_input_connections = input_step_dict.get("input_connections") self.normalize_input_connections_to_list(all_input_connections, input_name) input_connections = all_input_connections[input_name] # multiple outputs attached to this inputs, just detach # that specific one. delete_index = None for connection_index, output in enumerate(input_connections): if output["id"] == output_order_index and output[ "output_name"] == output_name: delete_index = connection_index break if delete_index is None: raise RequestParameterInvalidException( "Failed to locate connection to disconnect") del input_connections[delete_index]
def create_role(self, trans: ProvidesUserContext, role_definition_model) -> model.Role: name = role_definition_model.name description = role_definition_model.description user_ids = role_definition_model.user_ids or [] group_ids = role_definition_model.group_ids or [] if trans.sa_session.query(Role).filter(Role.name == name).first(): raise RequestParameterInvalidException( f"A role with that name already exists [{name}]") role_type = Role.types.ADMIN # TODO: allow non-admins to create roles role = Role(name=name, description=description, type=role_type) trans.sa_session.add(role) users = [ trans.sa_session.query(model.User).get(trans.security.decode_id(i)) for i in user_ids ] groups = [ trans.sa_session.query(model.Group).get( trans.security.decode_id(i)) for i in group_ids ] # Create the UserRoleAssociations for user in users: trans.app.security_agent.associate_user_role(user, role) # Create the GroupRoleAssociations for group in groups: trans.app.security_agent.associate_group_role(group, role) trans.sa_session.flush() return role
def fetch_job_states(app, sa_session, job_source_ids, job_source_types): decode = app.security.decode_id assert len(job_source_ids) == len(job_source_types) job_ids = set() implicit_collection_job_ids = set() for job_source_id, job_source_type in zip(job_source_ids, job_source_types): if job_source_type == "Job": job_ids.add(job_source_id) elif job_source_type == "ImplicitCollectionJobs": implicit_collection_job_ids.add(job_source_id) else: raise RequestParameterInvalidException( "Invalid job source type %s found." % job_source_type) # TODO: use above sets and optimize queries on second pass. rval = [] for job_source_id, job_source_type in zip(job_source_ids, job_source_types): if job_source_type == "Job": rval.append( summarize_jobs_to_dict( sa_session, sa_session.query(model.Job).get(decode(job_source_id)))) else: rval.append( summarize_jobs_to_dict( sa_session, sa_session.query(model.ImplicitCollectionJobs).get( decode(job_source_id)))) return rval
def _handle_invalid_link_data_only_elements_type(item): link_data_only = item.get("link_data_only", False) if link_data_only and item.get("elements_from", False) in ELEMENTS_FROM_TRANSIENT_TYPES: raise RequestParameterInvalidException( "link_data_only is invalid for derived elements from [%s]" % item.get("elements_from"))
def index(self, trans, shed, owner, repo, tool, version, image_file): """ Open an image file that is contained in an installed tool shed repository or that is referenced by a URL for display. The image can be defined in either a README.rst file contained in the repository or the help section of a Galaxy tool config that is contained in the repository. The following image definitions are all supported. The former $PATH_TO_IMAGES is no longer required, and is now ignored. .. image:: https://raw.github.com/galaxy/some_image.png .. image:: $PATH_TO_IMAGES/some_image.png .. image:: /static/images/some_image.gif .. image:: some_image.jpg .. image:: /deep/some_image.png """ guid = '/'.join([shed, 'repos', owner, repo, tool, version]) tool = trans.app.toolbox.get_tool(guid) repo_path = tool._repository_dir if 'static/images' not in image_file: path = join(repo_path, 'static', 'images', image_file) else: path = join(repo_path, image_file) if not safe_contains(os.path.abspath(repo_path), os.path.abspath(path)): raise RequestParameterInvalidException() ext = os.path.splitext(image_file)[-1].lstrip('.') if ext: mime = trans.app.datatypes_registry.get_mimetype_by_extension(ext) if mime: trans.response.set_content_type(mime) if os.path.exists(path): return open(path, 'rb')
def validate_server_directory_upload(trans, server_dir): if server_dir in [None, 'None', '']: raise RequestParameterInvalidException( "Invalid or unspecified server_dir parameter") if trans.user_is_admin: import_dir = trans.app.config.library_import_dir import_dir_desc = 'library_import_dir' if not import_dir: raise ConfigDoesNotAllowException( '"library_import_dir" is not set in the Galaxy configuration') else: import_dir = trans.app.config.user_library_import_dir if not import_dir: raise ConfigDoesNotAllowException( '"user_library_import_dir" is not set in the Galaxy configuration' ) if server_dir != trans.user.email: import_dir = os.path.join(import_dir, trans.user.email) import_dir_desc = 'user_library_import_dir' full_dir = os.path.join(import_dir, server_dir) unsafe = None if safe_relpath(server_dir): username = trans.user.username if trans.app.config.user_library_import_check_permissions else None if import_dir_desc == 'user_library_import_dir' and safe_contains( import_dir, full_dir, allowlist=trans.app.config. user_library_import_symlink_allowlist): for unsafe in unsafe_walk( full_dir, allowlist=[import_dir] + trans.app.config.user_library_import_symlink_allowlist, username=username): log.error( 'User attempted to import a path that resolves to a path outside of their import dir: %s -> %s', unsafe, os.path.realpath(unsafe)) else: log.error( 'User attempted to import a directory path that resolves to a path outside of their import dir: %s -> %s', server_dir, os.path.realpath(full_dir)) unsafe = True if unsafe: raise RequestParameterInvalidException("Invalid server_dir specified") return full_dir, import_dir_desc
def create_dataset_collection(self, trans, collection_type, element_identifiers=None, elements=None, hide_source_items=None): if element_identifiers is None and elements is None: raise RequestParameterInvalidException( ERROR_INVALID_ELEMENTS_SPECIFICATION) if not collection_type: raise RequestParameterInvalidException(ERROR_NO_COLLECTION_TYPE) collection_type_description = self.collection_type_descriptions.for_collection_type( collection_type) # If we have elements, this is an internal request, don't need to load # objects from identifiers. if elements is None: if collection_type_description.has_subcollections(): # Nested collection - recursively create collections and update identifiers. self.__recursively_create_collections(trans, element_identifiers) new_collection = False for element_identifier in element_identifiers: if element_identifier.get( "src") == "new_collection" and element_identifier.get( 'collection_type') == '': new_collection = True elements = self.__load_elements( trans, element_identifier['element_identifiers']) if not new_collection: elements = self.__load_elements(trans, element_identifiers) # else if elements is set, it better be an ordered dict! if elements is not self.ELEMENTS_UNINITIALIZED: type_plugin = collection_type_description.rank_type_plugin() dataset_collection = builder.build_collection( type_plugin, elements) if hide_source_items: log.debug( "Hiding source items during dataset collection creation") for dataset in dataset_collection.dataset_instances: dataset.visible = False else: dataset_collection = model.DatasetCollection(populated=False) dataset_collection.collection_type = collection_type return dataset_collection
def _apply_extract_input(self, action: ExtractInputAction, execution: RefactorActionExecution): input_step_dict, input_name = self._input_from_action(action) step = self._step_with_module(input_step_dict["id"], execution) module = step.module inputs = module.get_all_inputs() input_def = None found_input_names = [] for input in inputs: found_input_name = input["name"] found_input_names.append(found_input_name) if found_input_name == input_name: input_def = input break if input_def is None: raise RequestParameterInvalidException( f"Failed to find input with name {input_name} on step {input_step_dict['id']} - input names found {found_input_names}" ) if input_def.get("multiple", False): raise RequestParameterInvalidException( "Cannot extract input for multi-input inputs") module_input_type = input_def.get("input_type") # convert dataset, dataset_collection => data, data_collection for refactor API input_type = { "dataset": "data", "dataset_collection": "data_collection", }.get(module_input_type, module_input_type) input_action = AddInputAction( action_type="add_input", optional=input_def.get("optional"), type=input_type, label=action.label, position=action.position, ) new_input_order_index = self._add_input_get_order_index( input_action, execution) connect_action = ConnectAction( action_type="connect", input=action.input, output=OutputReferenceByOrderIndex( order_index=new_input_order_index), ) self._apply_connect(connect_action, execution)
def create_dataset_collection(self, trans, collection_type, element_identifiers=None, elements=None, hide_source_items=None, copy_elements=False): # Make sure at least one of these is None. assert element_identifiers is None or elements is None if element_identifiers is None and elements is None: raise RequestParameterInvalidException( ERROR_INVALID_ELEMENTS_SPECIFICATION) if not collection_type: raise RequestParameterInvalidException(ERROR_NO_COLLECTION_TYPE) collection_type_description = self.collection_type_descriptions.for_collection_type( collection_type) has_subcollections = collection_type_description.has_subcollections() # If we have elements, this is an internal request, don't need to load # objects from identifiers. if elements is None: elements = self._element_identifiers_to_elements( trans, collection_type_description=collection_type_description, element_identifiers=element_identifiers, hide_source_items=hide_source_items, copy_elements=copy_elements) else: if has_subcollections: # Nested collection - recursively create collections as needed. self.__recursively_create_collections_for_elements( trans, elements, hide_source_items, copy_elements=copy_elements) # else if elements is set, it better be an ordered dict! if elements is not self.ELEMENTS_UNINITIALIZED: type_plugin = collection_type_description.rank_type_plugin() dataset_collection = builder.build_collection( type_plugin, elements) else: dataset_collection = model.DatasetCollection(populated=False) dataset_collection.collection_type = collection_type return dataset_collection
def _find_step(self, step_reference: step_reference_union): order_index = None if isinstance(step_reference, StepReferenceByLabel): label = step_reference.label if not label: raise RequestParameterInvalidException("Empty label provided.") for step_order_index, step in self._as_dict["steps"].items(): if step["label"] == label: order_index = step_order_index break else: order_index = step_reference.order_index if order_index is None: raise RequestParameterInvalidException(f"Failed to resolve step_reference {step_reference}") if len(self._as_dict["steps"]) <= order_index: raise RequestParameterInvalidException(f"Failed to resolve step_reference {step_reference}") return self._as_dict["steps"][order_index]
def get_permissions( self, trans, encoded_folder_id: EncodedDatabaseIdField, scope: Optional[LibraryPermissionScope] = LibraryPermissionScope. current, page: Optional[int] = 1, page_limit: Optional[int] = 10, query: Optional[str] = None, ) -> Union[LibraryFolderCurrentPermissions, LibraryAvailablePermissions]: """ Load all permissions for the given folder id and return it. :param encoded_folder_id: the encoded id of the folder :type encoded_folder_id: an encoded id string :param scope: either 'current' or 'available' :type scope: string :returns: dictionary with all applicable permissions' values :rtype: dictionary :raises: InsufficientPermissionsException """ current_user_roles = trans.get_current_user_roles() is_admin = trans.user_is_admin decoded_folder_id = self.folder_manager.cut_and_decode( trans, encoded_folder_id) folder = self.folder_manager.get(trans, decoded_folder_id) if not (is_admin or trans.app.security_agent.can_manage_library_item( current_user_roles, folder)): raise InsufficientPermissionsException( 'You do not have proper permission to access permissions of this folder.' ) if scope is None or scope == LibraryPermissionScope.current: current_permissions = self.folder_manager.get_current_roles( trans, folder) return LibraryFolderCurrentPermissions.parse_obj( current_permissions) # Return roles that are available to select. elif scope == LibraryPermissionScope.available: roles, total_roles = trans.app.security_agent.get_valid_roles( trans, folder, query, page, page_limit) return_roles = [] for role in roles: role_id = trans.security.encode_id(role.id) return_roles.append( dict(id=role_id, name=role.name, type=role.type)) return LibraryAvailablePermissions(roles=return_roles, page=page, page_limit=page_limit, total=total_roles) else: raise RequestParameterInvalidException( "The value of 'scope' parameter is invalid. Allowed values: current, available" )
def __load_element(self, trans, element_identifier): # if not isinstance( element_identifier, dict ): # # Is allowing this to just be the id of an hda too clever? Somewhat # # consistent with other API methods though. # element_identifier = dict( src='hda', id=str( element_identifier ) ) # Previously created collection already found in request, just pass # through as is. if "__object__" in element_identifier: the_object = element_identifier["__object__"] if the_object is not None and the_object.id: context = self.model.context if the_object not in context: the_object = context.query(type(the_object)).get( the_object.id) return the_object # dateset_identifier is dict {src=hda|ldda|hdca|new_collection, id=<encoded_id>} try: src_type = element_identifier.get('src', 'hda') except AttributeError: raise MessageException( "Dataset collection element definition (%s) not dictionary-like." % element_identifier) encoded_id = element_identifier.get('id', None) if not src_type or not encoded_id: message_template = "Problem decoding element identifier %s - must contain a 'src' and a 'id'." message = message_template % element_identifier raise RequestParameterInvalidException(message) if src_type == 'hda': decoded_id = int(trans.app.security.decode_id(encoded_id)) element = self.hda_manager.get_accessible(decoded_id, trans.user) elif src_type == 'ldda': element = self.ldda_manager.get(trans, encoded_id) elif src_type == 'hdca': # TODO: Option to copy? Force copy? Copy or allow if not owned? element = self.__get_history_collection_instance( trans, encoded_id).collection # TODO: ldca. else: raise RequestParameterInvalidException( "Unknown src_type parameter supplied '%s'." % src_type) return element
def fetch_job_states(sa_session, job_source_ids, job_source_types): assert len(job_source_ids) == len(job_source_types) job_ids = set() implicit_collection_job_ids = set() workflow_invocations_job_sources = {} workflow_invocation_states = {} # should be set before we walk step states to be conservative on whether things are done expanding yet for job_source_id, job_source_type in zip(job_source_ids, job_source_types): if job_source_type == "Job": job_ids.add(job_source_id) elif job_source_type == "ImplicitCollectionJobs": implicit_collection_job_ids.add(job_source_id) elif job_source_type == "WorkflowInvocation": invocation_state = sa_session.query(model.WorkflowInvocation).get(job_source_id).state workflow_invocation_states[job_source_id] = invocation_state workflow_invocation_job_sources = [] for (invocation_step_source_type, invocation_step_source_id, invocation_step_state) in invocation_job_source_iter(sa_session, job_source_id): workflow_invocation_job_sources.append((invocation_step_source_type, invocation_step_source_id, invocation_step_state)) if invocation_step_source_type == "Job": job_ids.add(invocation_step_source_id) elif invocation_step_source_type == "ImplicitCollectionJobs": implicit_collection_job_ids.add(invocation_step_source_id) workflow_invocations_job_sources[job_source_id] = workflow_invocation_job_sources else: raise RequestParameterInvalidException("Invalid job source type %s found." % job_source_type) job_summaries = {} implicit_collection_jobs_summaries = {} for job_id in job_ids: job_summaries[job_id] = summarize_jobs_to_dict(sa_session, sa_session.query(model.Job).get(job_id)) for implicit_collection_jobs_id in implicit_collection_job_ids: implicit_collection_jobs_summaries[implicit_collection_jobs_id] = summarize_jobs_to_dict(sa_session, sa_session.query(model.ImplicitCollectionJobs).get(implicit_collection_jobs_id)) rval = [] for job_source_id, job_source_type in zip(job_source_ids, job_source_types): if job_source_type == "Job": rval.append(job_summaries[job_source_id]) elif job_source_type == "ImplicitCollectionJobs": rval.append(implicit_collection_jobs_summaries[job_source_id]) else: invocation_state = workflow_invocation_states[job_source_id] invocation_job_summaries = [] invocation_implicit_collection_job_summaries = [] invocation_step_states = [] for (invocation_step_source_type, invocation_step_source_id, invocation_step_state) in workflow_invocations_job_sources[job_source_id]: invocation_step_states.append(invocation_step_state) if invocation_step_source_type == "Job": invocation_job_summaries.append(job_summaries[invocation_step_source_id]) else: invocation_implicit_collection_job_summaries.append(implicit_collection_jobs_summaries[invocation_step_source_id]) rval.append(summarize_invocation_jobs(job_source_id, invocation_job_summaries, invocation_implicit_collection_job_summaries, invocation_state, invocation_step_states)) return rval
def __get_library_collection_instance(self, trans, id, check_ownership=False, check_accessible=True): if check_ownership: raise NotImplementedError("Functionality (getting library dataset collection with ownership check) unimplemented.") instance_id = int(trans.security.decode_id(id)) collection_instance = trans.sa_session.query(trans.app.model.LibraryDatasetCollectionAssociation).get(instance_id) if not collection_instance: raise RequestParameterInvalidException(f"Library dataset collection association {id} not found") if check_accessible: if not trans.app.security_agent.can_access_library_item(trans.get_current_user_roles(), collection_instance, trans.user): raise ItemAccessibilityException("LibraryDatasetCollectionAssociation is not accessible to the current user", type='error') return collection_instance
def __get_history_collection_instance(self, trans, id, check_ownership=False, check_accessible=True): instance_id = int(trans.app.security.decode_id(id)) collection_instance = trans.sa_session.query(trans.app.model.HistoryDatasetCollectionAssociation).get(instance_id) if not collection_instance: raise RequestParameterInvalidException(f"History dataset collection association {id} not found") history = getattr(trans, 'history', collection_instance.history) if check_ownership: self.history_manager.error_unless_owner(collection_instance.history, trans.user, current_history=history) if check_accessible: self.history_manager.error_unless_accessible(collection_instance.history, trans.user, current_history=history) return collection_instance
def __load_element(self, trans, element_identifier): #if not isinstance( element_identifier, dict ): # # Is allowing this to just be the id of an hda too clever? Somewhat # # consistent with other API methods though. # element_identifier = dict( src='hda', id=str( element_identifier ) ) # Previously created collection already found in request, just pass # through as is. if "__object__" in element_identifier: return element_identifier["__object__"] # dateset_identifier is dict {src=hda|ldda|hdca|new_collection, id=<encoded_id>} try: src_type = element_identifier.get('src', 'hda') except AttributeError: raise MessageException( "Dataset collection element definition (%s) not dictionary-like." % element_identifier) encoded_id = element_identifier.get('id', None) if not src_type or not encoded_id: raise RequestParameterInvalidException( "Problem decoding element identifier %s" % element_identifier) if src_type == 'hda': decoded_id = int(trans.app.security.decode_id(encoded_id)) element = self.hda_manager.get(trans, decoded_id, check_ownership=False) elif src_type == 'ldda': element = self.get_library_dataset_dataset_association( trans, encoded_id) elif src_type == 'hdca': # TODO: Option to copy? Force copy? Copy or allow if not owned? element = self.__get_history_collection_instance( trans, encoded_id).collection # TODO: ldca. else: raise RequestParameterInvalidException( "Unknown src_type parameter supplied '%s'." % src_type) return element
def get_indexes(self, id: str, index_type: str) -> Any: index_extensions = {'fasta_indexes': '.fai'} if index_type not in index_extensions: raise RequestParameterInvalidException(f'Invalid index type: {index_type}') tbl_entries = self._app.tool_data_tables.data_tables[index_type].data ext = index_extensions[index_type] index_filename = self._get_index_filename(id, tbl_entries, ext, index_type) try: with open(index_filename, mode='r') as f: return f.read() except OSError: raise ReferenceDataError(f'Failed to load index file for {id}')
def get_shed_tool_conf_dict(self, shed_tool_conf): """ Return the in-memory version of the shed_tool_conf file, which is stored in the config_elems entry in the shed_tool_conf_dict associated with the file. """ for shed_tool_conf_dict in self.app.toolbox.dynamic_confs(include_migrated_tool_conf=True): if shed_tool_conf == shed_tool_conf_dict['config_filename']: return shed_tool_conf_dict else: file_name = strip_path(shed_tool_conf_dict['config_filename']) if shed_tool_conf == file_name: return shed_tool_conf_dict raise RequestParameterInvalidException("Requested shed_tool_conf '%s' is not an active shed_tool_config_file" % shed_tool_conf)
def create_dataset_collection(self, trans, collection_type, element_identifiers=None, elements=None, hide_source_items=None): # Make sure at least one of these is None. assert element_identifiers is None or elements is None if element_identifiers is None and elements is None: raise RequestParameterInvalidException( ERROR_INVALID_ELEMENTS_SPECIFICATION) if not collection_type: raise RequestParameterInvalidException(ERROR_NO_COLLECTION_TYPE) collection_type_description = self.collection_type_descriptions.for_collection_type( collection_type) # If we have elements, this is an internal request, don't need to load # objects from identifiers. if elements is None: elements = self._element_identifiers_to_elements( trans, collection_type_description, element_identifiers) # else if elements is set, it better be an ordered dict! if elements is not self.ELEMENTS_UNINITIALIZED: type_plugin = collection_type_description.rank_type_plugin() dataset_collection = builder.build_collection( type_plugin, elements) if hide_source_items: log.debug( "Hiding source items during dataset collection creation") for dataset in dataset_collection.dataset_instances: dataset.visible = False else: dataset_collection = model.DatasetCollection(populated=False) dataset_collection.collection_type = collection_type return dataset_collection
def index(self, trans, **kwd): """ GET /api/tools Displays a collection of tools with optional criteria. :param q: (optional)if present search on the given query will be performed :type q: str :param page: (optional)requested page of the search :type page: int :param page_size: (optional)requested page_size of the search :type page_size: int :param jsonp: (optional)flag whether to use jsonp format response, defaults to False :type jsonp: bool :param callback: (optional)name of the function to wrap callback in used only when jsonp is true, defaults to 'callback' :type callback: str :returns dict: object containing list of results and metadata Examples: GET http://localhost:9009/api/tools GET http://localhost:9009/api/tools?q=fastq """ q = kwd.get('q', '') if not q: raise NotImplemented( 'Listing of all the tools is not implemented. Provide parameter "q" to search instead.' ) else: page = kwd.get('page', 1) page_size = kwd.get('page_size', 10) try: page = int(page) page_size = int(page_size) except ValueError: raise RequestParameterInvalidException( 'The "page" and "page_size" have to be integers.') return_jsonp = util.asbool(kwd.get('jsonp', False)) callback = kwd.get('callback', 'callback') search_results = self._search(trans, q, page, page_size) if return_jsonp: response = str('%s(%s);' % (callback, json.dumps(search_results))) else: response = json.dumps(search_results) return response
def _validate_and_parse_update_payload(self, payload): MINIMUM_STRING_LENGTH = 1 validated_payload = {} for key, val in payload.items(): if val is None: continue if key in ('name'): if len(val) < MINIMUM_STRING_LENGTH: raise RequestParameterInvalidException( f'{key} must have at least length of {MINIMUM_STRING_LENGTH}' ) val = validation.validate_and_sanitize_basestring(key, val) validated_payload[key] = val if key in ('misc_info', 'message'): val = validation.validate_and_sanitize_basestring(key, val) validated_payload[key] = val if key in ('file_ext'): datatype = self.app.datatypes_registry.get_datatype_by_extension( val) if datatype is None and val not in ("auto", ): raise RequestParameterInvalidException( f'This Galaxy does not recognize the datatype of: {val}' ) validated_payload[key] = val if key in ('genome_build'): if len(val) < MINIMUM_STRING_LENGTH: raise RequestParameterInvalidException( f'{key} must have at least length of {MINIMUM_STRING_LENGTH}' ) val = validation.validate_and_sanitize_basestring(key, val) validated_payload[key] = val if key in ('tags'): val = validation.validate_and_sanitize_basestring_list( key, util.listify(val)) validated_payload[key] = val return validated_payload
def validation_error_to_message_exception(e): invalid_found = False missing_found = False for error in e.errors(): if error["type"] == "value_error.missing" or error[ "type"] == "type_error.none.not_allowed": missing_found = True elif error["type"].startswith("type_error"): invalid_found = True if missing_found and not invalid_found: return RequestParameterMissingException(str(e), validation_errors=loads( e.json())) else: return RequestParameterInvalidException(str(e), validation_errors=loads( e.json()))
def get(self, trans, decoded_repo_id): """ Get the repo from the DB. :param decoded_repo_id: decoded repo id :type decoded_repo_id: int :returns: the requested repo :rtype: Repository """ try: repo = trans.sa_session.query(trans.app.model.Repository).filter(trans.app.model.Repository.table.c.id == decoded_repo_id).one() except MultipleResultsFound: raise InconsistentDatabase('Multiple repositories found with the same id.') except NoResultFound: raise RequestParameterInvalidException('No repository found with the id provided.') except Exception: raise InternalServerError('Error loading from the database.') return repo
def update(self, trans, group, name=None, description=None): """ Update the given group """ changed = False if not trans.user_is_admin(): raise ItemAccessibilityException('Only administrators can update groups.') if group.deleted: raise RequestParameterInvalidException('You cannot modify a deleted group. Undelete it first.') if name is not None: group.name = name changed = True if description is not None: group.description = description changed = True if changed: trans.sa_session.add(group) trans.sa_session.flush() return group
def _search(self, trans, q, page=1, page_size=10): """ Perform the search over TS repositories. Note that search works over the Whoosh index which you have to pre-create with scripts/tool_shed/build_ts_whoosh_index.sh manually. Also TS config option toolshed_search_on has to be True and whoosh_index_dir has to be specified. """ conf = self.app.config if not conf.toolshed_search_on: raise ConfigDoesNotAllowException( 'Searching the TS through the API is turned off for this instance.' ) if not conf.whoosh_index_dir: raise ConfigDoesNotAllowException( 'There is no directory for the search index specified. Please contact the administrator.' ) search_term = q.strip() if len(search_term) < 3: raise RequestParameterInvalidException( 'The search term has to be at least 3 characters long.') repo_search = RepoSearch() Boosts = namedtuple('Boosts', [ 'repo_name_boost', 'repo_description_boost', 'repo_long_description_boost', 'repo_homepage_url_boost', 'repo_remote_repository_url_boost', 'repo_owner_username_boost' ]) boosts = Boosts( float(conf.get('repo_name_boost', 0.9)), float(conf.get('repo_description_boost', 0.6)), float(conf.get('repo_long_description_boost', 0.5)), float(conf.get('repo_homepage_url_boost', 0.3)), float(conf.get('repo_remote_repository_url_boost', 0.2)), float(conf.get('repo_owner_username_boost', 0.3))) results = repo_search.search(trans, search_term, page, page_size, boosts) results['hostname'] = web.url_for('/', qualified=True) return results
def refactor(self, refactor_request: RefactorActions): action_executions = [] for action in refactor_request.actions: # TODO: we need to regenerate a detached workflow from as_dict after # after each iteration here. Otherwise one set of changes might render # the workflow state out of sync. It is fine if you're just executing one # action at a time or just performing actions that use raw_workflow_description. action_type = action.action_type refactor_method_name = f"_apply_{action_type}" refactor_method = getattr(self, refactor_method_name, None) if refactor_method is None: raise RequestParameterInvalidException( f"Unknown workflow editing action encountered [{action_type}]" ) execution = RefactorActionExecution( action=action.dict(), messages=[], ) refactor_method(action, execution) action_executions.append(execution) return action_executions
def _apply_add_input(self, action: AddInputAction, execution: RefactorActionExecution): input_type = action.type module_type = None tool_state: Dict[str, Any] = {} if input_type in ["data", "dataset"]: module_type = "data_input" elif input_type in ["data_collection", "dataset_collection"]: module_type = "data_collection_input" tool_state["collection_type"] = action.collection_type else: if input_type not in InputParameterModule.POSSIBLE_PARAMETER_TYPES: raise RequestParameterInvalidException( f"Invalid input type {input_type} encountered") module_type = "parameter_input" tool_state["parameter_type"] = input_type for action_key in [ "restrictions", "suggestions", "optional", "default" ]: value = getattr(action, action_key, None) if value is not None: tool_state[action_key] = value if action.restrict_on_connections is not None: tool_state[ "restrictOnConnections"] = action.restrict_on_connections add_step_kwds = {} if action.label: add_step_kwds["label"] = action.label add_step_action = AddStepAction(action_type="add_step", type=module_type, tool_state=tool_state, position=action.position, **add_step_kwds) self._apply_add_step(add_step_action, execution)