def upload_image_from_file(file, dataset_name, project_name='', attachments=(), wait=-1): attachments = list(attachments) dataset_id = get_or_create_dataset_id(dataset_name, project_name) with cli_login(*LOGIN_ARGS) as cli: conn = BlitzGateway(client_obj=cli._client) r = file_import(cli._client, file, wait) if r: links = [] # TODO - doing this as iterable fileset for single file is weird p = r.pixels[0] image_id = p.image.id.val logger.debug('Imported Image ID: %d' % image_id) link = omero.model.DatasetImageLinkI() link.parent = omero.model.DatasetI(dataset_id, False) link.child = omero.model.ImageI(image_id, False) links.append(link) conn.getUpdateService().saveArray(links, conn.SERVICE_OPTS) if len(attachments) > 0: # have to have loadedness -> True to link an annotation image = conn.getObject("Image", image_id) for attachment in attachments: # TODO - add guess_mimetype / namespace here upload_file_annotation(conn, image, attachment, namespace='pyme.localizations') return image_id
def list_children(name, ignore): with cli_login() as cli: conn = BlitzGateway(client_obj=cli.get_client()) screen = conn.getObject( 'Screen', attributes={'name': "idr0072-schormann-subcellref/" + name}) return [f"Plate:{x.id}" for x in screen.listChildren()]
def localization_files_from_image_url(image_url, out_dir): """ Parameters ---------- image_url : str url from OMERO web client, gotten typically by clicking the link symbol with an image selected, i.e. `Link to this image`. out_dir : str path / name of tempfile.TemporaryDirectory Returns ------- localization_files : list paths to localization files saved to disk """ from urllib.parse import urlparse, parse_qs from omero.util.populate_roi import DownloadingOriginalFileProvider url = urlparse(image_url) # for `Link to this image` image_id = int(parse_qs(url.query)['show'][0].split('-')[-1]) localization_files = [] with cli_login(*LOGIN_ARGS) as cli: conn = BlitzGateway(client_obj=cli._client) # Would be nice to specify pyme.localizations namespace, but probably # reliable to check file extentsion since one can manually attach # localizations with no namespace specified # localization_links = list(conn.getAnnotationLinks('Image', # parent_ids=[image_id], # ns="pyme.localizations")) localization_links = list( conn.getAnnotationLinks('Image', parent_ids=[image_id])) raw_file_store = conn.createRawFileStore() for link in localization_links: try: # select for Blitzwrapped omero types with getFile attr og_file = link.getChild().getFile()._obj except AttributeError: # not all BlitzWrapped omero types have getFile continue filename = og_file.getName().getValue() if os.path.splitext(filename)[-1] not in ['.hdf', '.h5r']: continue path = os.path.join(out_dir, filename) localization_files.append(path) raw_file_store.setFileId(og_file.id.val) with open(path, 'wb') as f: f.write(raw_file_store.read(0, og_file.size.val)) return localization_files
def connect_and_upload_file_annotation(image_id, file, mimetype='application/octet-stream', namespace='', description=None): with cli_login(*LOGIN_ARGS) as cli: conn = BlitzGateway(client_obj=cli._client) upload_file_annotation(conn, image_id, file, mimetype, namespace, description)
def execOmeroCommand(args): logging.info("exec command:" + str(args)) with cli_login("--server", "localhost", "--port", "4064", "--group", "pharmbio_read_annotate", "--user", "root", "--password", omero_rootpass) as cli: logging.info("Before command") cli.invoke(args) logging.info("After command") cli.close()
def main(argv): parser = argparse.ArgumentParser() parser.add_argument('username2', help='Target server Username') parser.add_argument('password2', help='Target server Password') parser.add_argument('server2', help='Target server') parser.add_argument('source', help=('Copy ROIs FROM this: Image:ID or Dataset:ID')) parser.add_argument('target', help=('Copy ROIs TO this: Image:ID or Dataset:ID')) args = parser.parse_args(argv) with cli_login() as cli: conn = BlitzGateway(client_obj=cli._client) conn.SERVICE_OPTS.setOmeroGroup(-1) conn2 = BlitzGateway(args.username2, args.password2, port=4064, host=args.server2) conn2.connect() source_images = [] target_image_ids = [] source = args.source source_id = int(source.split(":")[1]) target = args.target target_id = int(target.split(":")[1]) if source.startswith('Image:'): source_images.append(conn.getObject('Image', source_id)) target_image_ids.append(target_id) elif source.startswith('Dataset:'): dataset = conn.getObject('Dataset', source_id) target_dataset = conn2.getObject('Dataset', target_id) ids_by_name = image_ids_by_name(target_dataset) for image in dataset.listChildren(): if image.name in ids_by_name: source_images.append(image) target_image_ids.append(ids_by_name[image.name]) else: print("Source needs to be Image:ID or Dataset:ID") print("Processing", source_images) print("...to target images:", target_image_ids) for image, to_target_id in zip(source_images, target_image_ids): process_image(conn, conn2, image, to_target_id) conn2.close()
def execOmeroCommand(args): logging.info("exec command:" + str(args)) stdout_ = sys.stdout redirected_stdout = cStringIO.StringIO() sys.stdout = redirected_stdout retval = "" with cli_login("--server", "localhost", "--port", "4064", "--user", "root", "--password", omero_rootpass) as cli: logging.info("Before command") cli.invoke(args) logging.info("After command") retval = redirected_stdout.getvalue() cli.close() # restore stream sys.stdout = stdout_ logging.info("retval " + retval) return retval
def main(argv): parser = argparse.ArgumentParser() parser.add_argument('--verbose', '-v', action='count', default=0, help='Increase the command verbosity') parser.add_argument('--quiet', '-q', action='count', default=0, help='Decrease the command verbosity') parser.parse_args(argv) df = pandas.read_csv(PROCESSED_FILE_PATH, delimiter='\t') with cli_login() as c: conn = BlitzGateway(client_obj=c.get_client()) images = get_timelapse_images(conn) for image in images: df2 = create_points(conn, df, image) iid = image.getId() df2.to_csv(f"{iid}.csv", index=False)
def main(argv): parser = argparse.ArgumentParser() # parser.addArgument('--session', type=int, help=( # 'Connect with this session ID, default is to use or create a ' # 'session from the OMERO CLI')) parser.add_argument( '--dataset', type=int, help=( 'Add imported files to this Dataset ID (not valid when wait=-1)')) parser.add_argument( '--wait', type=int, default=-1, help=('Wait for this number of seconds for each import to complete. ' '0: return immediately, -1: wait indefinitely (default)')) parser.add_argument('path', nargs='+', help='Files or directories') args = parser.parse_args(argv) with cli_login() as cli: conn = BlitzGateway(client_obj=cli._client) if args.dataset and not conn.getObject('Dataset', args.dataset): print('Dataset id not found: %s' % args.dataset) sys.exit(1) for fs_path in args.path: print('Importing: %s' % fs_path) rsp = full_import(cli._client, fs_path, args.wait) if rsp: links = [] for p in rsp.pixels: print('Imported Image ID: %d' % p.image.id.val) if args.dataset: link = omero.model.DatasetImageLinkI() link.parent = omero.model.DatasetI(args.dataset, False) link.child = omero.model.ImageI(p.image.id.val, False) links.append(link) conn.getUpdateService().saveArray(links, conn.SERVICE_OPTS)
def main(argv): parser = argparse.ArgumentParser() parser.add_argument('--verbose', '-v', action='count', default=0, help='Increase the command verbosity') parser.add_argument('--quiet', '-q', action='count', default=0, help='Decrease the command verbosity') parser.add_argument('--dry-run', '-n', action='store_true', help='Run command in dry-run mode') args = parser.parse_args(argv) default_level = logging.INFO - 10 * args.verbose + 10 * args.quiet logging.basicConfig(level=default_level) with cli_login() as c: conn = BlitzGateway(client_obj=c.get_client()) attach_tables(conn)
def download_image(image_url, out_dir): """ Parameters ---------- image_url : str url from OMERO web client, gotten typically by clicking the link symbol with an image selected, i.e. `Link to this image`. out_dir : str path / name of tempfile.TemporaryDirectory Returns ------- path : str path to file saved to disk """ from urllib.parse import urlparse, parse_qs from omero.util.populate_roi import DownloadingOriginalFileProvider url = urlparse(image_url) # for `Link to this image` image_id = int(parse_qs(url.query)['show'][0].split('-')[-1]) with cli_login(*LOGIN_ARGS) as cli: conn = BlitzGateway(client_obj=cli._client) image = conn.getObject("Image", image_id) total_size, buff_generator = image.exportOmeTiff(BUFF_SIZE) path = os.path.join(out_dir, os.path.splitext(image.getName())[0] + '.tif') with open(path, 'wb') as f: for buff in buff_generator: f.write(buff) return path
def get_or_create_dataset_id(dataset_name, project_name=''): with cli_login(*LOGIN_ARGS) as cli: conn = BlitzGateway(client_obj=cli._client) # handle linking with project if given if project_name != '': # check if the dataset already exists within the project project_id = get_or_create_project_id(conn, project_name) # projects = conn.getContainerService().loadContainerHierarchy('Project', # None, None) project = conn.getObject("Project", project_id) dataset_wrapper = project.findChildByName(dataset_name) if dataset_wrapper != None: return dataset_wrapper.getId() # make a new dataset dataset_id = create_dataset(conn, dataset_name) # link it to the project links = [] link = omero.model.ProjectDatasetLinkI() link.parent = omero.model.ProjectI(project_id, False) link.child = omero.model.DatasetI(dataset_id, False) links.append(link) conn.getUpdateService().saveArray(links, conn.SERVICE_OPTS) return dataset_id # no project specified, check if the dataset already exists datasets = conn.getContainerService().loadContainerHierarchy( 'Dataset', None, None) for d in datasets: if d.getName().getValue() == dataset_name: return d.getId().getValue() # otherwise create a new dataset return create_dataset(conn, dataset_name)
def download_image_data(image_ids_or_dataset_id, dataset=False, download_original=False, channel=None, z_stack=0, frame=0, coord=(0, 0), width=0, height=0, region_spec='rectangle', skip_failed=False, download_tar=False, omero_host='idr.openmicroscopy.org', omero_secured=False, config_file=None): if config_file is None: # IDR connection omero_username = '******' omero_password = '******' else: # other omero instance with open(config_file) as f: cfg = json.load(f) omero_username = cfg['username'] omero_password = cfg['password'] if omero_username == "" or omero_password == "": omero_username = '******' omero_password = '******' if not download_original and region_spec not in ['rectangle', 'center']: raise ValueError( 'Got unknown value "{0}" as region_spec argument'.format( region_spec)) with ExitStack() as exit_stack: conn = exit_stack.enter_context( BlitzGateway(omero_username, omero_password, host=omero_host, secure=omero_secured)) # exit_stack.callback(conn.connect().close) if download_tar: # create an archive file to write images to archive = exit_stack.enter_context( tarfile.open('images.tar', mode='w')) tempdir = exit_stack.enter_context(TemporaryDirectory()) if dataset: dataset_warning_id = 'Dataset-ID: {0}'.format( image_ids_or_dataset_id[0]) try: dataset_id = int(image_ids_or_dataset_id[0]) except ValueError: image_ids = None else: try: dataset = conn.getObject("Dataset", dataset_id) except Exception as e: # respect skip_failed on unexpected errors if skip_failed: warn(str(e), dataset_warning_id, warn_skip=True) else: raise else: image_ids = [image.id for image in dataset.listChildren()] if image_ids is None: if skip_failed: warn( 'Unable to find a dataset with this ID in the ' 'database.', dataset_warning_id, warn_skip=True) else: raise ValueError( '{0}: Unable to find a dataset with this ID in the ' 'database. Aborting!'.format(dataset_warning_id)) else: # basic argument sanity checks and adjustments prefix = 'image-' # normalize image ids by stripping off prefix if it exists image_ids = [ iid[len(prefix):] if iid[:len(prefix)] == prefix else iid for iid in image_ids_or_dataset_id ] for image_id in image_ids: image_warning_id = 'Image-ID: {0}'.format(image_id) try: image_id = int(image_id) except ValueError: image = None else: try: image = conn.getObject("Image", image_id) except Exception as e: # respect skip_failed on unexpected errors if skip_failed: warn(str(e), image_warning_id, warn_skip=True) continue else: raise if image is None: if skip_failed: warn( 'Unable to find an image with this ID in the ' 'database.', image_warning_id, warn_skip=True) continue raise ValueError( '{0}: Unable to find an image with this ID in the ' 'database. Aborting!'.format(image_warning_id)) if not download_original: try: # try to extract image properties # if anything goes wrong here skip the image # or abort. image_name = os.path.splitext(image.getName())[0] image_warning_id = '{0} (ID: {1})'.format( image_name, image_id) if region_spec == 'rectangle': tile = get_clipping_region(image, *coord, width, height) elif region_spec == 'center': tile = get_clipping_region( image, *_center_to_ul(*coord, width, height)) ori_z, z_stack = z_stack, confine_plane(image, z_stack) ori_frame, frame = frame, confine_frame(image, frame) num_channels = image.getSizeC() if channel is None: channel_index = 0 else: channel_index = find_channel_index(image, channel) except Exception as e: # respect skip_failed on unexpected errors if skip_failed: warn(str(e), image_warning_id, warn_skip=True) continue else: raise # region sanity checks and warnings if tile[2] < width or tile[3] < height: # The downloaded image region will have smaller dimensions # than the specified width x height. warn( 'Downloaded image dimensions ({0} x {1}) will be smaller ' 'than the specified width and height ({2} x {3}).'. format(tile[2], tile[3], width, height), image_warning_id) # z-stack sanity checks and warnings if z_stack != ori_z: warn( 'Specified image plane ({0}) is out of bounds. Using {1} ' 'instead.'.format(ori_z, z_stack), image_warning_id) # frame sanity checks and warnings if frame != ori_frame: warn( 'Specified image frame ({0}) is out of bounds. Using ' 'frame {1} instead.'.format(ori_frame, frame), image_warning_id) # channel index sanity checks and warnings if channel is None: if num_channels > 1: warn( 'No specific channel selected for multi-channel ' 'image. Using first of {0} channels.'.format( num_channels), image_warning_id) else: if channel_index == -1 or channel_index >= num_channels: if skip_failed: warn( str(channel) + ' is not a known channel name for this image.', image_warning_id, warn_skip=True) continue else: raise ValueError( '"{0}" is not a known channel name for image {1}. ' 'Aborting!'.format(channel, image_warning_id)) # download and save the region as TIFF fname = '__'.join([image_name, str(image_id)] + [str(x) for x in tile]) try: if fname[-5:] != '.tiff': fname += '.tiff' fname = fname.replace(' ', '_') im_array = get_image_array(image, tile, z_stack, channel_index, frame) if download_tar: fname = os.path.join(tempdir, fname) try: tiff = TIFF.open(fname, mode='w') tiff.write_image(im_array) finally: tiff.close() # move image into tarball if download_tar: archive.add(fname, os.path.basename(fname)) os.remove(fname) except Exception as e: if skip_failed: # respect skip_failed on unexpected errors warn(str(e), image_warning_id, warn_skip=True) continue else: raise else: try: # try to extract image properties # if anything goes wrong here skip the image # or abort. image_name = os.path.splitext(image.getName())[0] image_warning_id = '{0} (ID: {1})'.format( image_name, image_id) original_image_name = image.getFileset().listFiles( )[0].getName() fname = image_name + "__" + str( image_id) + os.path.splitext(original_image_name)[1] fname = fname.replace(' ', '_') fname = fname.replace('/', '_') download_directory = "./" if download_tar: download_directory = tempdir with cli_login("-u", omero_username, "-s", omero_host, "-w", omero_password) as cli: cli.invoke([ "download", f"Image:{image_id}", download_directory ]) if cli.rv != 0: raise Exception("Download failed.") # This will download to download_directory/original_image_name os.rename( os.path.join(download_directory, original_image_name), os.path.join(download_directory, fname)) # move image into tarball if download_tar: archive.add(os.path.join(download_directory, fname), os.path.basename(fname)) os.remove(os.path.join(download_directory, fname)) except Exception as e: # respect skip_failed on unexpected errors if skip_failed: warn(str(e), image_warning_id, warn_skip=True) continue else: raise
from omero.cli import cli_login with cli_login("root@omero", "-womero") as cli: cli.loadplugins() cli.invoke(["import", "/data/hcs.fake", "---file=/tmp/import.out"]) with open("/tmp/import.out", "r") as o: output = o.read().strip() cli.invoke(["zarr", "export", output])
#!/usr/bin/env python import omero from omero.cli import cli_login from omero.rtypes import unwrap import pandas as pd import re import warnings with cli_login() as cli: client = cli.get_client() bulkcsv = 'taraoceans.BULK_ANNOTATION.csv' screenid = 1201 ignore_missing = True ns = omero.constants.namespaces.NSBULKANNOTATIONS qs = client.getSession().getQueryService() # noqa us = client.getSession().getUpdateService() # noqa df = pd.read_csv(bulkcsv) q = ('SELECT spl.child.id, spl.child.name FROM ScreenPlateLink spl ' 'WHERE spl.parent.id=%d' % screenid) rs = unwrap(qs.projection(q, None)) class PlateInfo: