def get_annotations_list(connection, fetch_images=False): switch_to_default_search_group(connection) tag_sets = list() tags = list() for t in connection.getObjects("TagAnnotation"): if _is_tagset(t): tag_sets.append(t) else: tags.append(t) annotations = list() for ts in tag_sets: tags_map = _get_tags_list(connection, ts, fetch_images, append_raw_object=True) for tmi in tags_map: tobj = tmi.pop('obj') try: tags.pop(tags.index(tobj)) except ValueError: pass tagset_json = _tagset_to_json(ts, tags_map) annotations.append(tagset_json) for t in tags: imgs_list = list() if fetch_images: imgs_list = _get_images_by_tag(t.getId(), connection) annotations.append(_tag_to_json(t, imgs_list)) return annotations
def _get_images_by_tag(tag_id, connection): switch_to_default_search_group(connection) imgs_generator = connection.getObjectsByAnnotations('Image', [tag_id]) images = list() for img in imgs_generator: images.append(_image_to_json(img, connection)) return images
def get_images_quick_list(connection, expand_imgs_series=False): switch_to_default_search_group(connection) images_list = [] # get OMERO images imgs = connection.getObjects('Image') if not expand_imgs_series: imgs = _reduce_images_series(imgs) for img in imgs: images_list.append( { 'omero_id': img.getId(), 'name': img.getName(), 'img_type': 'OMERO_IMG' # right now we only need to separate OMERO imgs from "special" ones like MIRAX } ) # get "special" images like MIRAX ones (actually, right now only the MIRAX ones...) query_filter = {'mimetype': 'mirax/index'} imgs = connection.getObjects('OriginalFile', attributes=query_filter) for img in imgs: images_list.append( { 'omero_id': img.getId(), 'name': img.getName(), 'img_type': 'MIRAX' } ) return images_list
def get_dataset(connection, dataset_id, fetch_images=False, expand_img_series=False): switch_to_default_search_group(connection) dataset = connection.getObject('Dataset', dataset_id) if dataset is not None: if fetch_images: images = _get_images_for_dataset(dataset, not expand_img_series) else: images = [] return _dataset_to_json(dataset, connection=connection, image_objects=images) return None
def get_tag(connection, tag_id, fetch_images=False): switch_to_default_search_group(connection) tag = connection.getObject('TagAnnotation', tag_id) if tag is not None and not _is_tagset(tag): images = list() if fetch_images: images = _get_images_by_tag(tag_id, connection) return _tag_to_json(tag, images) else: return None
def get_tagset(connection, tagset_id, fetch_tags=False, fetch_images=False): switch_to_default_search_group(connection) tagset = connection.getObject('TagAnnotation', tagset_id) if tagset is not None and _is_tagset(tagset): tags_map = list() if fetch_tags: tags_map = _get_tags_list(connection, tagset, fetch_images) return _tagset_to_json(tagset, tags_map) else: return None
def get_image(connection, image_id, fetch_rois=False): switch_to_default_search_group(connection) img = connection.getObject('Image', image_id) if img is not None: if fetch_rois: roi_service = connection.getRoiService() results = roi_service.findByImage(int(image_id), None) rois = list(results.rois) else: rois = [] return _image_to_json(img, connection, True, rois) return None
def get_projects(connection, fetch_datasets=False): switch_to_default_search_group(connection) projects = list(connection.listProjects()) projects_json = [] for proj in projects: if fetch_datasets: datasets = list(proj.listChildren()) else: datasets = [] projects_json.append(_project_to_json(proj, connection=connection, datasets_map=((d, []) for d in datasets))) return projects_json
def get_projects(connection, fetch_datasets=False): switch_to_default_search_group(connection) projects = list(connection.listProjects()) projects_json = [] for proj in projects: if fetch_datasets: datasets = list(proj.listChildren()) else: datasets = [] projects_json.append( _project_to_json(proj, connection=connection, datasets_map=((d, []) for d in datasets))) return projects_json
def get_project(connection, project_id, fetch_datasets=False, fetch_images=False, expand_img_series=False): switch_to_default_search_group(connection) project = connection.getObject('Project', project_id) if project is not None: datasets_map = [] if fetch_datasets: datasets = list(project.listChildren()) for ds in datasets: if fetch_images: images = _get_images_for_dataset(ds, not expand_img_series) else: images = [] datasets_map.append((ds, images)) return _project_to_json(project, datasets_map=datasets_map, connection=connection) return None
def find_annotations(search_pattern, connection, fetch_images=False): switch_to_default_search_group(connection) query_service = connection.getQueryService() query_params = omero.sys.ParametersI() query_params.addString('search_pattern', '%%%s%%' % search_pattern) query = ''' SELECT t FROM TagAnnotation t WHERE lower(t.description) LIKE lower(:search_pattern) OR lower(t.textValue) LIKE lower(:search_pattern) ''' annotations = list() for res in query_service.findAllByQuery(query, query_params): res = TagAnnotationWrapper(connection, res) if _is_tagset(res): annotations.append(_tagset_to_json(res)) else: imgs_list = list() if fetch_images: imgs_list = _get_images_by_tag(res.getId(), connection) annotations.append(_tag_to_json(res, imgs_list)) return annotations
def get_images_quick_list(connection, expand_imgs_series=False): switch_to_default_search_group(connection) images_list = [] # get OMERO images imgs = connection.getObjects('Image') if not expand_imgs_series: imgs = _reduce_images_series(imgs) for img in imgs: images_list.append({ 'omero_id': img.getId(), 'name': img.getName(), 'img_type': 'OMERO_IMG' # right now we only need to separate OMERO imgs from "special" ones like MIRAX }) # get "special" images like MIRAX ones (actually, right now only the MIRAX ones...) query_filter = {'mimetype': 'mirax/index'} imgs = connection.getObjects('OriginalFile', attributes=query_filter) for img in imgs: images_list.append({ 'omero_id': img.getId(), 'name': img.getName(), 'img_type': 'MIRAX' }) return images_list
def get_original_files(connection, name, mimetype=None): switch_to_default_search_group(connection) query_filter = {'name': name} if mimetype: query_filter['mimetype'] = mimetype return list(connection.getObjects('OriginalFile', attributes=query_filter))