def test_disable_flag(self, tmpdir): self.create_image() # Set all channels active... gw = BlitzGateway(client_obj=self.client) image = gw.getObject('Image', self.idonly) image.setActiveChannels([1, 2, 3, 4]) image.saveDefaults() # Update first channel rd = { 'channels': { 1: { 'active': True, 'start': 0, 'end': 255 }, } } rdfile = tmpdir.join('render-test.json') rdfile.write(json.dumps(rd)) self.args += ["set", self.idonly, str(rdfile)] self.cli.invoke(self.args, strict=True) # re-load image - check all channels still active image = gw.getObject('Image', self.idonly) for ch in image.getChannels(): assert ch.isActive() # Re-run with --disable flag self.args += ["--disable"] self.cli.invoke(self.args, strict=True) # re-load image - check ONLY first channel is active image = gw.getObject('Image', self.idonly) for idx, ch in enumerate(image.getChannels()): assert ch.isActive() == (idx == 0)
def test_canDoAction(self, func): """ Test if canEdit returns appropriate flag """ client, user = self.new_client_and_user() image = self.make_image() share_id = self.create_share( objects=[image], description="description", experimenters=[user]) share = self.sf.getShareService() assert len(share.getContents(share_id)) == 1 # test action by member user_conn = BlitzGateway(client_obj=client) # user CANNOT see image if not in share assert None == user_conn.getObject("Image", image.id.val) # activate share user_conn.SERVICE_OPTS.setOmeroShare(share_id) assert False == getattr(user_conn.getObject("Image", image.id.val), func)() # test action by owner owner_conn = BlitzGateway(client_obj=self.client) # owner CAN do action on the object when not in share assert True == getattr(owner_conn.getObject("Image", image.id.val), func)() # activate share owner_conn.SERVICE_OPTS.setOmeroShare(share_id) # owner CANNOT do action on the object when in share assert False == getattr(owner_conn.getObject("Image", image.id.val), func)()
def test_canDoAction(self, func): """ Test if canEdit returns appropriate flag """ client, user = self.new_client_and_user() image = self.make_image() share_id = self.create_share(objects=[image], description="description", experimenters=[user]) share = self.sf.getShareService() assert len(share.getContents(share_id)) == 1 # test action by member user_conn = BlitzGateway(client_obj=client) # user CANNOT see image if not in share assert None == user_conn.getObject("Image", image.id.val) # activate share user_conn.SERVICE_OPTS.setOmeroShare(share_id) assert False == getattr(user_conn.getObject("Image", image.id.val), func)() # test action by owner owner_conn = BlitzGateway(client_obj=self.client) # owner CAN do action on the object when not in share assert True == getattr(owner_conn.getObject("Image", image.id.val), func)() # activate share owner_conn.SERVICE_OPTS.setOmeroShare(share_id) # owner CANNOT do action on the object when in share assert False == getattr(owner_conn.getObject("Image", image.id.val), func)()
def create_containers(cli, dataset, project=None): """ Creates containers with names provided if they don't exist already. Returns Dataset ID. """ sessionId = cli._event_context.sessionUuid conn = BlitzGateway(host='localhost') conn.connect(sUuid=sessionId) params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() params.theFilter.ownerId = wrap(conn.getUser().getId()) d = None prId = None if project is not None: p = conn.getObject("Project", attributes={'name': project}, params=params) if p is None: print "Creating Project:", project p = omero.model.ProjectI() p.name = wrap(project) prId = conn.getUpdateService().saveAndReturnObject(p).id.val else: print "Using Project:", project, p prId = p.getId() # Since Project already exists, check children for Dataset for c in p.listChildren(): if c.getName() == dataset: d = c if d is None: d = conn.getObject("Dataset", attributes={'name': dataset}, params=params) if d is None: print "Creating Dataset:", dataset d = omero.model.DatasetI() d.name = wrap(dataset) dsId = conn.getUpdateService().saveAndReturnObject(d).id.val if prId is not None: print "Linking Project-Dataset..." link = omero.model.ProjectDatasetLinkI() link.child = omero.model.DatasetI(dsId, False) link.parent = omero.model.ProjectI(prId, False) conn.getUpdateService().saveObject(link) else: print "Using Dataset:", dataset, d dsId = d.getId() return dsId
def main(argv): parser = argparse.ArgumentParser() parser.add_argument('username2', help='Target server Username') parser.add_argument('password2', help='Target server Password') parser.add_argument('server2', help='Target server') parser.add_argument('source', help=('Copy ROIs FROM this: Image:ID or Dataset:ID')) parser.add_argument('target', help=('Copy ROIs TO this: Image:ID or Dataset:ID')) args = parser.parse_args(argv) with cli_login() as cli: conn = BlitzGateway(client_obj=cli._client) conn.SERVICE_OPTS.setOmeroGroup(-1) conn2 = BlitzGateway(args.username2, args.password2, port=4064, host=args.server2) conn2.connect() source_images = [] target_image_ids = [] source = args.source source_id = int(source.split(":")[1]) target = args.target target_id = int(target.split(":")[1]) if source.startswith('Image:'): source_images.append(conn.getObject('Image', source_id)) target_image_ids.append(target_id) elif source.startswith('Dataset:'): dataset = conn.getObject('Dataset', source_id) target_dataset = conn2.getObject('Dataset', target_id) ids_by_name = image_ids_by_name(target_dataset) for image in dataset.listChildren(): if image.name in ids_by_name: source_images.append(image) target_image_ids.append(ids_by_name[image.name]) else: print("Source needs to be Image:ID or Dataset:ID") print("Processing", source_images) print("...to target images:", target_image_ids) for image, to_target_id in zip(source_images, target_image_ids): process_image(conn, conn2, image, to_target_id) conn2.close()
def get_coordinates_list(index_ligne,imageId, large, delta_x,z): conn = BlitzGateway('tbacoyannis','d33pl34rn1ng',port=4064,host='chinensis.polytechnique.fr') connected = conn.connect() image = conn.getObject("Image", imageId) pixels = image.getPrimaryPixels() # get raw pixels information size_x= image.getSizeX() size_y= image.getSizeY() size_z = image.getSizeZ() size_c = image.getSizeC() size_t = image.getSizeT() nb_fenetres=np.arange(size_x/delta_x) index=nb_fenetres x=(index*delta_x)+delta_x #x=x[22:-22] y=((index_ligne*delta_x)+delta_x)*np.ones(len(x)) #(index)) index_colonne=index+2 index_lignee=(2+index_ligne)*np.ones(len(x)) Tuple_Coord=(x,y,index_lignee, index_colonne) conn._closeSession() return(Tuple_Coord)
def list_children(name, ignore): with cli_login() as cli: conn = BlitzGateway(client_obj=cli.get_client()) screen = conn.getObject( 'Screen', attributes={'name': "idr0072-schormann-subcellref/" + name}) return [f"Plate:{x.id}" for x in screen.listChildren()]
def run(username, password, idr_obj, local_obj, host, port): conn = BlitzGateway(username, password, host=host, port=port) try: conn.connect() # Project:1 if ':' not in idr_obj or ':' not in local_obj: print('Objects must be: %s' % obj_help) return dtype = idr_obj.split(':')[0] idr_id = idr_obj.split(':')[1] localtype = local_obj.split(':')[0] if dtype != localtype: print("Object types must match") return local_id = local_obj.split(':')[1] if dtype == 'Project': annotate_project(conn, local_id, idr_id) elif dtype == 'Plate': plate = conn.getObject('Plate', local_id) annotate_plate(conn, plate, idr_id) elif dtype == 'Screen': annotate_screen(conn, local_id, idr_id) else: print("Only supports Project, Screen or Plate") finally: conn.close()
def upload_image_from_file(file, dataset_name, project_name='', attachments=(), wait=-1): attachments = list(attachments) dataset_id = get_or_create_dataset_id(dataset_name, project_name) with cli_login(*LOGIN_ARGS) as cli: conn = BlitzGateway(client_obj=cli._client) r = file_import(cli._client, file, wait) if r: links = [] # TODO - doing this as iterable fileset for single file is weird p = r.pixels[0] image_id = p.image.id.val logger.debug('Imported Image ID: %d' % image_id) link = omero.model.DatasetImageLinkI() link.parent = omero.model.DatasetI(dataset_id, False) link.child = omero.model.ImageI(image_id, False) links.append(link) conn.getUpdateService().saveArray(links, conn.SERVICE_OPTS) if len(attachments) > 0: # have to have loadedness -> True to link an annotation image = conn.getObject("Image", image_id) for attachment in attachments: # TODO - add guess_mimetype / namespace here upload_file_annotation(conn, image, attachment, namespace='pyme.localizations') return image_id
def get_file_contents(client, original_file_id): """Returns Original File contents as a string.""" conn = BlitzGateway(client_obj=client) orig_file = conn.getObject("OriginalFile", original_file_id) text = "".join(orig_file.getFileInChunks()) conn.close() return text
def main(): try: conn = BlitzGateway(username, password, host=server, port=4064) conn.connect() # For loading ROIs updateService = conn.getUpdateService() from omero.rtypes import rdouble, rint, rstring image = conn.getObject("Image", imageId) roi_service = conn.getRoiService() result = roi_service.findByImage(imageId, None) roi = result.rois[roi_num-1] print("ROI: ID:", roi.getId().getValue()) for s in roi.copyShapes(): if type(s) == omero.model.EllipseI: ctr_x = s.getX().getValue() ctr_y = s.getY().getValue() x,y,w,h=rect_roi_frm_ctr(image,ctr_x,ctr_y,width,height) MAX_GFP=max_proj(image,max_ch,x,y,w,h) BFZ9=load_stack_plane(image,slc_ch,slc,x,y,w,h) hstk = np.stack((MAX_GFP, BFZ9), axis = 0) tmp_str= 'output_'+str(imageId)+'_'+str(roi.getId().getValue())+'.tif' output_hyperstack(hstk, tmp_str) finally: if conn: conn.close()
def main(argv): parser = make_parser() args = parser.parse_args(argv[1:]) if not args.out_file: args.out_file = "map_screen_%d.tsv" % args.screen_id passwd = getpass.getpass() conn = BlitzGateway( args.user, passwd, host=args.host, port=args.port, group=args.group ) conn.connect() screen = conn.getObject("Screen", args.screen_id) print "writing to %s" % args.out_file print "SCREEN: %s" % screen.name with open(args.out_file, "w") as fo: fo.write("PLATE\tSERIES\tWELL\tFIELD\tImageID\tWellID\n") for p in screen.listChildren(): rows = [] print " plate: %s" % p.name for w in p.listChildren(): n_fields = sum(1 for _ in w.listChildren()) for i in xrange(n_fields): img = w.getImage(i) well_tag = "%s%02d" % (LETTERS[w.row], w.column + 1) rows.append(( p.name, img.getSeries(), well_tag, i + 1, img.id, w.id )) rows.sort(key=itemgetter(1)) rows.sort() for r in rows: fo.write("%s\t%d\t%s\t%d\t%d\t%d\n" % r)
def testFakeImport(self): # TODO: should likely be in the "fs" namespace req = omero.cmd.OriginalMetadataRequest() client = self.new_client() rsp = self.fullImport(client) # Note: fake test produces no metadata! image = rsp.objects[0] req.imageId = image.id.val gateway = BlitzGateway(client_obj=client) # Load via the gateway image = gateway.getObject("Image", image.id.val) assert 3 == len(image.loadOriginalMetadata()) # Load via raw request handle = client.sf.submit(req) try: gateway._waitOnCmd(handle, failonerror=True) rsp = handle.getResponse() assert dict == type(rsp.globalMetadata) assert dict == type(rsp.seriesMetadata) finally: handle.close()
def testEditSingleC(self, targetName, greyscale, tmpdir): sizec = 1 # 1 channel so should default to greyscale model expected_greyscale = ((greyscale is None) or greyscale) self.create_image(sizec=sizec) rd = self.get_render_def(sizec=sizec, greyscale=greyscale) rdfile = tmpdir.join('render-test-editsinglec.json') # Should work with json and yaml, but yaml is an optional dependency rdfile.write(json.dumps(rd)) target = getattr(self, targetName) self.args += ["edit", target, str(rdfile)] self.cli.invoke(self.args, strict=True) iids = self.get_target_imageids(target) print 'Got %d images' % len(iids) gw = BlitzGateway(client_obj=self.client) for iid in iids: # Get the updated object img = gw.getObject('Image', iid) # Note: calling _prepareRE below does NOT suffice! img._prepareRenderingEngine() # Call *before* getChannels # Passing noRE to getChannels below also prevents leaking # the RenderingEngine but then Nones are returned later. channels = img.getChannels() assert len(channels) == sizec for c in xrange(len(channels)): self.assert_channel_rdef(channels[c], rd['channels'][c + 1]) self.assert_image_rmodel(img, expected_greyscale) img._closeRE() assert not gw._assert_unregistered("testEditSingleC")
def check_file_annotation(client, file_annotation, parent_type="Image", is_linked=True, file_name=None): """ Check validity of file annotation. If hasFileAnnotation, check the size, name and number of objects linked to the original file. """ assert file_annotation is not None orig_file = file_annotation.getValue().getFile() assert orig_file.getSize().getValue() > 0 assert orig_file.getName().getValue() is not None id = file_annotation.getValue().getId().getValue() assert id > 0 conn = BlitzGateway(client_obj=client) wrapper = conn.getObject("FileAnnotation", id) name = None if file_name is not None: name = wrapper.getFile().getName() links = sum(1 for i in wrapper.getParentLinks(parent_type)) conn.close() if is_linked: assert links == 1 else: assert links == 0 if file_name is not None: assert name == file_name
def testEdit(self, targetName, tmpdir): sizec = 4 greyscale = None # 4 channels so should default to colour model expected_greyscale = False self.create_image(sizec=sizec) rd = self.get_render_def(sizec=sizec, greyscale=greyscale) rdfile = tmpdir.join('render-test-edit.json') # Should work with json and yaml, but yaml is an optional dependency rdfile.write(json.dumps(rd)) target = getattr(self, targetName) self.args += ["edit", target, str(rdfile)] self.cli.invoke(self.args, strict=True) iids = self.get_target_imageids(target) print 'Got %d images' % len(iids) gw = BlitzGateway(client_obj=self.client) for iid in iids: # Get the updated object img = gw.getObject('Image', iid) channels = img.getChannels() assert len(channels) == sizec for c in xrange(len(channels)): self.assert_channel_rdef(channels[c], rd['channels'][c + 1]) self.assert_image_rmodel(img, expected_greyscale) img._closeRE() assert not gw._assert_unregistered("testEdit")
def run(password, project_name, dataset_name, host, port): for user_number in range(1, 41): username = "******" % user_number print(username) conn = BlitzGateway(username, password, host=host, port=port) try: conn.connect() project = ProjectI() project.setName(rstring(project_name)) update_service = conn.getUpdateService() project = update_service.saveAndReturnObject(project) ds = conn.getObject("Dataset", attributes={'name': dataset_name}, opts={'owner': conn.getUserId()}) if ds is None: print("No dataset with name %s found" % dataset_name) continue dataset_id = ds.getId() print(username, dataset_id) link = ProjectDatasetLinkI() link.setParent(ProjectI(project.getId().getValue(), False)) link.setChild(DatasetI(dataset_id, False)) conn.getUpdateService().saveObject(link) except Exception as exc: print("Error while creating project: %s" % str(exc)) finally: conn.close()
def run(datasetname, password, host, port): conn = BlitzGateway('trainer-1', password, host=host, port=port) conn.connect() try: trainer_dataset = conn.getObject("Dataset", attributes={'name': datasetname}, opts={'owner': conn.getUserId()}) # Create {name: [tag_id, tag_id]} for images in Dataset image_tag_links = {} image_ratings = {} for image in trainer_dataset.listChildren(): tag_ids = [] for ann in image.listAnnotations(): if ann.OMERO_TYPE.__name__ == 'TagAnnotationI': tag_ids.append(ann.id) elif ann.ns == RATING_NS: image_ratings[image.getName()] = ann.longValue if len(tag_ids) > 0: image_tag_links[image.getName()] = tag_ids # print image_tag_links print('image_ratings', image_ratings) link_tags(conn, datasetname, image_tag_links, image_ratings) finally: conn.close()
def test_numpy_to_image(self): client = self.new_client() image = self.create_test_image(100, 100, 2, 3, 4, client.getSession()) conn = BlitzGateway(client_obj=client) image = conn.getObject("Image", image.id.val) pixels = image.getPrimaryPixels() channel_min_max = [] for c in image.getChannels(): min_c = c.getWindowMin() max_c = c.getWindowMax() channel_min_max.append((min_c, max_c)) z = image.getSizeZ() / 2 t = 0 c = 0 try: for min_max in channel_min_max: plane = pixels.getPlane(z, c, t) i = scriptUtil.numpy_to_image(plane, min_max, int32) assert i is not None try: # check if the image can be handled. i.load() assert True except IOError: assert False c += 1 finally: conn.close()
def main(argv): parser = make_parser() args = parser.parse_args(argv[1:]) if not args.out_file: args.out_file = "map_screen_%d.tsv" % args.screen_id passwd = getpass.getpass() conn = BlitzGateway(args.user, passwd, host=args.host, port=args.port, group=args.group) conn.connect() screen = conn.getObject("Screen", args.screen_id) print "writing to %s" % args.out_file print "SCREEN: %s" % screen.name with open(args.out_file, "w") as fo: fo.write("PLATE\tSERIES\tWELL\tFIELD\tImageID\tWellID\n") for p in screen.listChildren(): rows = [] print " plate: %s" % p.name for w in p.listChildren(): n_fields = sum(1 for _ in w.listChildren()) for i in xrange(n_fields): img = w.getImage(i) well_tag = "%s%02d" % (LETTERS[w.row], w.column + 1) rows.append((p.name, img.getSeries(), well_tag, i + 1, img.id, w.id)) rows.sort(key=itemgetter(1)) rows.sort() for r in rows: fo.write("%s\t%d\t%s\t%d\t%d\t%d\n" % r)
def run(username, password, project_id, host, port): conn = BlitzGateway(username, password, host=host, port=port) try: conn.connect() project = conn.getObject("Project", project_id) for dataset in project.listChildren(): print("\n\nDataset", dataset.id, dataset.name) for image in dataset.listChildren(): print("Image", image.id, image.name) ann = image.getAnnotation(NAMESPACE) if ann is None: print(" No annotation found") continue keys = ann.getValue() values = [kv[1] for kv in keys if kv[0] == MAP_KEY] if len(values) == 0: print(" No Key-Value found for key:", MAP_KEY) channels = values[0].split("; ") print("Channels", channels) name_dict = {} for c, ch_name in enumerate(channels): name_dict[c + 1] = ch_name.split(":")[1] conn.setChannelNames("Image", [image.id], name_dict, channelCount=None) except Exception as exc: print("Error while changing names: %s" % str(exc)) finally: conn.close()
def check(obj): from omero.cli import CLI from omero.gateway import BlitzGateway cli = CLI() cli.loadplugins() cli.onecmd('login') try: gateway = BlitzGateway(client_obj=cli.get_client()) remote_obj = gateway.getObject( obj.type, attributes={"name": obj.name}) errors = [] if remote_obj.description != obj.description: errors.append("current:%s\nexpected:%s" % ( remote_obj.description, obj.description)) for al in remote_obj._getAnnotationLinks( ns="openmicroscopy.org/omero/client/mapAnnotation"): mapValue = al.child.mapValue kv_pairs = [(m.name, m.value) for m in mapValue] for i in range(len(kv_pairs)): if kv_pairs[i] != obj.map[i]: errors.append( "current:%s\nexpected:%s" % (kv_pairs[i], obj.map[i])) if not errors: log.info("No annotations mismatch detected") else: for e in errors: log.info("Found some annotations mismatch") print e finally: if cli: cli.close() gateway.close()
def run(password, target, host, port): for user_number in range(1, 51): username = "******" % user_number print(username) conn = BlitzGateway(username, password, host=host, port=port) try: conn.connect() ds = conn.getObject("Dataset", attributes={'name': target}, opts={'owner': conn.getUserId()}) if ds is None: print("No dataset with name %s found" % target) continue print("Dataset", ds.getId()) conn.setChannelNames("Dataset", [ds.getId()], { 1: "H2B", 2: "nuclear lamina" }) except Exception as exc: print("Error while changing the channel names: %s" % str(exc)) finally: # Close connection for each user when done conn.close()
def testFakeImport(self): # TODO: should likely be in the "fs" namespace req = omero.cmd.OriginalMetadataRequest() client = self.new_client() rsp = self.full_import(client) # Note: fake test produces no metadata! image = rsp.objects[0] req.imageId = image.id.val gateway = BlitzGateway(client_obj=client) # Load via the gateway image = gateway.getObject("Image", image.id.val) assert 3 == len(image.loadOriginalMetadata()) # Load via raw request handle = client.sf.submit(req) try: gateway._waitOnCmd(handle, failonerror=True) rsp = handle.getResponse() assert dict == type(rsp.globalMetadata) assert dict == type(rsp.seriesMetadata) finally: handle.close()
def checkFileAnnotation(self, fileAnnotation, hasFileAnnotation=True, parentType="Image", isLinked=True, client=None): """ Check validity of file annotation. If hasFileAnnotation, check the size, name and number of objects linked to the original file. """ if hasFileAnnotation: assert fileAnnotation is not None assert fileAnnotation.val._file._size._val > 0 assert fileAnnotation.val._file._name._val is not None if client is None: client = self.root conn = BlitzGateway(client_obj=client) faWrapper = conn.getObject("FileAnnotation", fileAnnotation.val.id.val) nLinks = sum(1 for i in faWrapper.getParentLinks(parentType)) if isLinked: assert nLinks == 1 else: assert nLinks == 0 else: assert fileAnnotation is None
def run(name, password, dataset_name, dataset_id, host, port): conn = BlitzGateway(name, password, host=host, port=port) try: conn.connect() roi_service = conn.getRoiService() datasets = [] if dataset_id >= 0: datasets.append(conn.getObject("Dataset", dataset_id)) else: datasets = conn.getObjects("Dataset", attributes={"name": dataset_name}) for dataset in datasets: print(dataset.getId()) for image in dataset.listChildren(): result = roi_service.findByImage(image.getId(), None, conn.SERVICE_OPTS) if result is not None: roi_ids = [roi.id.val for roi in result.rois] if len(roi_ids) > 0: print("Deleting %s ROIs..." % len(roi_ids)) conn.deleteObjects("Roi", roi_ids, wait=True) except Exception as exc: print("Error while deleting Rois: %s" % str(exc)) finally: conn.close()
def create_containers(cli, dataset, project=None): """ Creates containers with names provided if they don't exist already. Returns Dataset ID. """ sessionId = cli._event_context.sessionUuid conn = BlitzGateway() conn.connect(sUuid = sessionId) params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() params.theFilter.ownerId = wrap(conn.getUser().getId()) d = None prId = None if project is not None: p = conn.getObject("Project", attributes={'name': project}, params=params) if p is None: print "Creating Project:", project p = omero.model.ProjectI() p.name = wrap(project) prId = conn.getUpdateService().saveAndReturnObject(p).id.val else: print "Using Project:", project, p prId = p.getId() # Since Project already exists, check children for Dataset for c in p.listChildren(): if c.getName() == dataset: d = c if d is None: d = conn.getObject("Dataset", attributes={'name': dataset}, params=params) if d is None: print "Creating Dataset:", dataset d = omero.model.DatasetI() d.name = wrap(dataset) dsId = conn.getUpdateService().saveAndReturnObject(d).id.val if prId is not None: print "Linking Project-Dataset..." link = omero.model.ProjectDatasetLinkI() link.child = omero.model.DatasetI(dsId, False) link.parent = omero.model.ProjectI(prId, False) conn.getUpdateService().saveObject(link) else: print "Using Dataset:", dataset, d dsId = d.getId() return dsId
def _lookup(conn: BlitzGateway, proxy_obj): """Find object of type by ID.""" conn.SERVICE_OPTS.setOmeroGroup("-1") type_ = proxy_obj.__class__.__name__.rstrip("I") obj = conn.getObject(type_, proxy_obj.id) if not obj: raise FileNotFoundError(f"No such {type_}: {proxy_obj.id}") return obj
def test_dataset_to_plate(self, image_names): script_id = super(TestUtilScripts, self).get_script(dataset_to_plate) assert script_id > 0 # root session is root.sf session = self.root.sf client = self.root # create several test images in a dataset dataset = self.make_dataset("dataset_to_plate-test", client=client) # Images will be sorted by name and assigned Column first image_ids = [] for i in image_names: # x,y,z,c,t image = self.create_test_image(100, 100, 1, 1, 1, session, name=i) self.link(dataset, image, client=client) image_ids.append(image.id.val) # Minimum args. dataset_ids = [omero.rtypes.rlong(dataset.id.val)] args = { "Data_Type": wrap("Dataset"), "IDs": wrap(dataset_ids), "First_Axis_Count": wrap(3), } # With more image names, add extra args images_per_well = 1 if (len(image_names)) == 8: images_per_well = 2 args["Images_Per_Well"] = wrap(images_per_well) args["First_Axis_Count"] = wrap(2) args["Column_Names"] = wrap("letter") args["Row_Names"] = wrap("number") d_to_p = run_script(client, script_id, args, "New_Object") # check the result - load all Wells from Plate, check image IDs assert d_to_p is not None plate_id = d_to_p.getValue().id.val # Check names of Images matches Well position images_in_plate = [] conn = BlitzGateway(client_obj=client) plate = conn.getObject("Plate", plate_id) for well in plate.listChildren(): print('well', well) for w in range(images_per_well): if images_per_well == 1: name = well.getWellPos() else: # e.g. "A1_0" name = "%s_%d" % (well.getWellPos(), w) image = well.getImage(w) assert image.getName() == name images_in_plate.append(image.getId()) # and all images were in the Plate images_in_plate.sort() assert images_in_plate == image_ids
def lookup_obj(conn: BlitzGateway, iobj: IObject) -> BlitzObjectWrapper: """Find object of type by ID.""" conn.SERVICE_OPTS.setOmeroGroup("-1") type_ = iobj.__class__.__name__.rstrip("I") obj = conn.getObject(type_, iobj.id) if not obj: raise NameError(f"No such {type_}: {iobj.id}") return obj
def test_get_line_data(self): client = self.new_client() image = self.create_test_image(100, 100, 1, 1, 1, client.getSession()) id = image.id.val conn = BlitzGateway(client_obj=client) image = conn.getObject("Image", id) pixels = image.getPrimaryPixels() line = get_line_data(pixels, 10, 0, 10, 20, 10) assert len(line) == 10
def test_chgrp_new_container(self, dataset, credentials): """ Performs a chgrp POST, polls the activities json till done, then checks that Dataset has moved to new group and has new Project as parent. """ django_client = self.get_django_client(credentials) request_url = reverse('chgrp') projectName = "chgrp-project%s" % (self.uuid()) data = { "group_id": self.group2.id.val, "Dataset": dataset.id.val, "new_container_name": projectName, "new_container_type": "project", } data = _csrf_post_response_json(django_client, request_url, data) expected = {"update": {"childless": {"project": [], "orphaned": False, "dataset": []}, "remove": {"project": [], "plate": [], "screen": [], "image": [], "dataset": [dataset.id.val]}}} assert data == expected activities_url = reverse('activities_json') data = _get_response_json(django_client, activities_url, {}) # Keep polling activities until no jobs in progress while data['inprogress'] > 0: time.sleep(0.5) data = _get_response_json(django_client, activities_url, {}) # individual activities/jobs are returned as dicts within json data for k, o in data.items(): if hasattr(o, 'values'): # a dict if 'report' in o: print o['report'] assert o['status'] == 'finished' assert o['job_name'] == 'Change group' assert o['to_group_id'] == self.group2.id.val # Dataset should now be in new group, contained in new Project conn = BlitzGateway(client_obj=self.client) userId = conn.getUserId() conn.SERVICE_OPTS.setOmeroGroup('-1') d = conn.getObject("Dataset", dataset.id.val) assert d is not None assert d.getDetails().group.id.val == self.group2.id.val p = d.getParent() assert p is not None assert p.getName() == projectName # Project owner should be current user assert p.getDetails().owner.id.val == userId
def ls(self, args): """List all the original files contained in a fileset""" client = self.ctx.conn(args) gateway = BlitzGateway(client_obj=client) gateway.SERVICE_OPTS.setOmeroGroup("-1") fileset = gateway.getObject("Fileset", args.fileset.id.val) defaultdict(list) for ofile in fileset.listFiles(): print ofile.path + ofile.name
def run(password, admin_name, target, tag, host, port): for i in range(1, 51): username = "******" % i print(username) conn = BlitzGateway(username, password, host=host, port=port) try: conn.connect() updateService = conn.getUpdateService() ds = conn.getObject("Dataset", attributes={'name': target}, opts={'owner': conn.getUserId()}) if ds is None: print("No dataset with name %s found" % target) continue params = omero.sys.ParametersI() params.addString('username', admin_name) query = "from TagAnnotation where textvalue='%s' \ AND details.owner.omeName=:username" % tag query_service = conn.getQueryService() tags = query_service.findAllByQuery(query, params, conn.SERVICE_OPTS) if len(tags) == 0: print("No tag with name %s found" % tag) continue tag_id = tags[0].id.getValue() print(tag_id) links = [] for image in ds.listChildren(): name = image.getName() if name in images_to_tag: # Check first that the image is not tagged params = omero.sys.ParametersI() params.addLong('parent', image.id) params.addLong('child', tag_id) query = "select link from ImageAnnotationLink as link \ where link.parent.id=:parent \ AND link.child.id=:child" values = query_service.findAllByQuery( query, params, conn.SERVICE_OPTS) if len(values) == 0: link = ImageAnnotationLinkI() link.parent = ImageI(image.id, False) link.child = TagAnnotationI(tag_id, False) links.append(link) else: print("Tag %s already linked to %s" % (tag, name)) if len(links) > 0: updateService.saveArray(links) except Exception as exc: print("Error when tagging the images: %s" % str(exc)) finally: conn.close()
def setup_method(self, method): super(MetadataTestBase, self).setup_method(method) self.name = self.uuid() self.image = self.importSingleImage( GlobalMetadata={'gmd-' + self.name: 'gmd-' + self.name}) conn = BlitzGateway(client_obj=self.client) self.imageid = unwrap(self.image.getId()) assert type(self.imageid) == long wrapper = conn.getObject("Image", self.imageid) self.md = Metadata(wrapper)
def check_file_annotation(client, file_annotation, parent_type="Image", link_count=0): """ Check validity of file annotation. If hasFileAnnotation, check the size, name and number of objects linked to the original file. """ assert file_annotation is not None id = file_annotation.getId().getValue() assert id > 0 orig_file = file_annotation.getFile() assert orig_file.getSize().getValue() > 0 assert orig_file.getName().getValue() is not None conn = BlitzGateway(client_obj=client) wrapper = conn.getObject("FileAnnotation", id) links = sum(1 for i in wrapper.getParentLinks(parent_type)) conn.close() assert links == link_count
def checkFileAnnotation(self, fileAnnotation, hasFileAnnotation=True, parentType="Image", isLinked=True, client=None): """ Check validity of file annotation. If hasFileAnnotation, check the size, name and number of objects linked to the original file. """ if hasFileAnnotation: self.assertNotEqual(fileAnnotation,None) self.assertTrue(fileAnnotation.val._file._size._val>0) self.assertNotEqual(fileAnnotation.val._file._name._val,None) if client is None: client = self.root conn = BlitzGateway(client_obj = client) faWrapper = conn.getObject("FileAnnotation", fileAnnotation.val.id.val) nLinks = sum(1 for i in faWrapper.getParentLinks(parentType)) if isLinked: self.assertEqual(nLinks,1) else: self.assertEqual(nLinks,0) else: self.assertEqual(fileAnnotation,None)
def user_exists(self, user): if user in self.known_users.keys(): print "User:"******"root", "omero", host='localhost') conn.connect() params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() u = conn.getObject("Experimenter", attributes={'omeName': user}, params=params) except: print "Error getting user - ignoring." return False if u is None: print "User:"******"does not exist - ignoring." return False else: print "User:", user, self.known_users[user] = [] return True finally: conn.seppuku()
def copy(client, copy_from, copy_type, copy_to): gateway = BlitzGateway(client_obj=client) print gateway.applySettingsToSet(copy_from, copy_type, [copy_to]) gateway.getObject("Image", copy_to).getThumbnail(size=(96,), direct=False)
sys.exit(1) else: print "Login successful.\n" # Set our default group so we can see the data. # (I thought I had to do this, but now I am not sure it's needed. # -JLM 2013/12/04) try: group = next(g for g in conn.listGroups() if g.getName() == GROUP) except StopIteration: print >> sys.stderr, "Error: could not find group '%s'" % GROUP conn.setGroupForSession(group.getId()) # Get plate of interest. # (ID 1552 is "RTK ligands induce differing FOXO3a translocation dynamics") plate = conn.getObject("Plate", 1552) # Get list of lists of well objects from the plate. well_grid = plate.getWellGrid() # Loop over all wells in the plate. for (raw_row_num, row) in enumerate(well_grid): for (raw_col_num, well) in enumerate(row): # Fix up row and column numbers to match Pat's nomenclature. row_num = raw_row_num + 1 col_num = raw_col_num + 2 # Construct paths and create output directory. (I dislike this naming # format but it's what Pat has chosen. -JLM) output_dir = "r%dc%d" % (row_num, col_num) full_output_dir = os.path.join("frames", output_dir) makedirs_exist_ok(full_output_dir) # Get image object and a few of its useful attributes.
# Query the table for rows where the 'Uid' is in a particular range # ================================================================= queryRows = openTable.getWhereList( "(Uid > 2) & (Uid <= 8)", variables={}, start=0, stop=rowCount, step=0) data = openTable.readCoordinates(queryRows) for col in data.columns: print "Query Results for Column: ", col.name for v in col.values: print " ", v openTable.close() # we're done # In future, to get the table back from Original File # =================================================== orig_table_file = conn.getObject( "OriginalFile", attributes={'name': tablename}) # if name is unique savedTable = conn.c.sf.sharedResources().openTable(orig_table_file._obj) print "Opened table with row-count:", savedTable.getNumberOfRows() # Populate a table on a Plate from a csv file # =========================================== colNames = "Well, Well Type, Concentration\n" csvLines = [ colNames, "A1, Control, 0\n", "A2, Treatment, 5\n", "A3, Treatment, 10\n"] with open('data.csv', 'w') as csvData: csvData.writelines(csvLines) plate = conn.getObject("Plate", plateId)
# Create a connection # ================================================================= conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) conn.connect() # Configuration # ================================================================= imageId = 401 # Save a plane (raw data) as tiff for analysis # ================================================================= image = conn.getObject("Image", imageId) # first plane of the image pixels = image.getPrimaryPixels() # make a note of min max pixel values for each channel # so that we can scale all the planes from each channel to the same range channelMinMax = [] for c in image.getChannels(): minC = c.getWindowMin() maxC = c.getWindowMax() channelMinMax.append((minC, maxC)) print channelMinMax # Go through each channel (looping through Z and T not shown - go for mid-Z only) # ================================================================= theZ = image.getSizeZ() / 2 theT = 0
# not pass the params argument to getObjects, then all Datasets # in the current group would be returned. print "\nList Datasets:" print "=" * 50 params = omero.sys.ParametersI() params.exp(conn.getUser().getId()) # only show current user's Datasets datasets = conn.getObjects("Dataset", params=params) for dataset in datasets: print_obj(dataset) # Retrieve the images contained in a dataset # ========================================== print "\nDataset:%s" % datasetId print "=" * 50 dataset = conn.getObject("Dataset", datasetId) print "\nImages in Dataset:", dataset.getName() for image in dataset.listChildren(): print_obj(image) # Retrieve an image by ID # ======================= image = conn.getObject("Image", imageId) print "\nImage:%s" % imageId print "=" * 50 print image.getName(), image.getDescription() # Retrieve information about an image. print " X:", image.getSizeX() print " Y:", image.getSizeY() print " Z:", image.getSizeZ()
desc = "Image created from a hard-coded arrays" i = conn.createImageFromNumpySeq( planeGen(), "numpy image", sizeZ, sizeC, sizeT, description=desc, dataset=None) print 'Created new Image:%s Name:"%s"' % (i.getId(), i.getName()) # Set the pixel size using units (new in 5.1.0) # ================================================================= # Lengths are specified by value and a unit enumeration # Here we set the pixel size X and Y to be 9.8 Angstroms from omero.model.enums import UnitsLength # Re-load the image to avoid update conflicts i = conn.getObject("Image", i.getId()) u = omero.model.LengthI(9.8, UnitsLength.ANGSTROM) p = i.getPrimaryPixels()._obj p.setPhysicalSizeX(u) p.setPhysicalSizeY(u) conn.getUpdateService().saveObject(p) # Create an Image from an existing image # ================================================================= # We are going to create a new image by passing the method a 'generator' of 2D # planes This will come from an existing image, by taking the average of 2 # channels. zctList = [] image = conn.getObject('Image', imageId) sizeZ, sizeC, sizeT = image.getSizeZ(), image.getSizeC(), image.getSizeT()
conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) conn.connect() # Create a new Dataset # ==================== datasetObj = omero.model.DatasetI() datasetObj.setName(rstring("New Dataset")) datasetObj = conn.getUpdateService().saveAndReturnObject(datasetObj) datasetId = datasetObj.getId().getValue() print "New dataset, Id:", datasetId # Link to Project # =============== project = conn.getObject("Project", projectId) if project is None: import sys sys.stderr.write("Error: Object does not exist.\n") sys.exit(1) link = omero.model.ProjectDatasetLinkI() link.setParent(omero.model.ProjectI(project.getId(), False)) link.setChild(datasetObj) conn.getUpdateService().saveObject(link) # Annotate Project with a new 'tag' # ================================= tagAnn = omero.gateway.TagAnnotationWrapper(conn) tagAnn.setValue("New Tag") tagAnn.save()
# ================================================================= conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) conn.connect() # Configuration # ================================================================= script, imageId = argv # Reitrieve an image by Image ID: # ================================================================= image = conn.getObject("Image", imageId) print "\nImage:%s" % imageId print "=" * 50 print image.getName(), image.getDescription() # Retrieve information about an image. print " X:", image.getSizeX() print " Y:", image.getSizeY() print " Z:", image.getSizeZ() print " C:", image.getSizeC() print " T:", image.getSizeT() # render the first timepoint, mid Z section z = image.getSizeZ() / 2 t = 0 renderedImage = image.renderImage(z, t) #renderedImage.show() # popup (use for debug only) renderedImage.save("test.jpg") # save in the current folder
class Omg(object): """ OMERO gateway that wraps Blitz gateway and CLI, intended for scripting and interactive work. Attributes ---------- conn : Blitz gateway connection """ def __init__(self, conn=None, user=None, passwd=None, server=SERVER, port=PORT, skey=None): """ Requires active Blitz connection OR username plus password or sesskey """ if conn is None and (user is None or (passwd is None and skey is None)): raise ValueError("Bad parameters," + self.__init__.__doc__) if conn is not None: if conn.isConnected(): self.conn = conn else: raise ValueError("Cannot initialize with closed connection!") else: if passwd is not None: self.conn = BlitzGateway(user, passwd, host=server, port=port) self.conn.connect() else: self.conn = BlitzGateway(user, host=server, port=port) self.conn.connect(skey) if self.conn.isConnected(): self._server = self.conn.host self._port = self.conn.port self._user = self.conn.getUser().getName() self._key = self.conn.getSession().getUuid().getValue() print("Connected to {0} (port {1}) as {2}, session key={3}".format( self._server, self._port, self._user, self._key)) else: print("Failed to open connection :-(") def ls(self): """ Print groups, then projects/datasets/images for current group. """ print("Groups for {0}:-".format(self.conn.getUser().getName())) for gid, gname in self._ls_groups(): print(" {0} ({1})".format(gname, str(gid))) curr_grp = self.conn.getGroupFromContext() gid, gname = curr_grp.getId(), curr_grp.getName() print("\nData for current group, {0} ({1}):-".format(gname, gid)) for pid, pname in self._ls_projects(): print(" Project: {0} ({1})".format(pname, str(pid))) for did, dname in self._ls_datasets(pid): print(" Dataset: {0} ({1})".format(dname, str(did))) for iid, iname in self._ls_images(did): print(" Image: {0} ({1})".format(iname, str(iid))) # TODO, list orphaned Datasets and Images def _ls_groups(self): """list groups (id, name) this session is a member of""" groups = self.conn.getGroupsMemberOf() return [(group.getId(), group.getName()) for group in groups] def _ls_projects(self): """list projects (id, name) in the current session group""" projs = self.conn.listProjects(self.conn.getUserId()) return [(proj.getId(), proj.getName()) for proj in projs] def _ls_datasets(self, proj_id): """list datasets (id, name) within the project id given""" dsets = self.conn.getObject("Project", proj_id).listChildren() return [(dset.getId(), dset.getName()) for dset in dsets] def _ls_images(self, dset_id): """list images (id, name) within the dataset id given""" imgs = self.conn.getObject("Dataset", dset_id).listChildren() return [(img.getId(), img.getName()) for img in imgs] def chgrp(self, group_id): """ Change group for this session to the group_id given. """ self.conn.setGroupForSession(group_id) def get(self, im_id, get_att=True): """ Download the specified image as an OME-TIFF to current directory, with attachments also downloaded to folder: img_path + '_attachments' Return : path to downloaded image """ img = self.conn.getObject("Image", oid=im_id) img_name = self._unique_name(img.getName(), im_id) img_path = os.path.join(os.getcwd(), img_name) img_file = open(str(img_path + ".ome.tiff"), "wb") fsize, blockgen = img.exportOmeTiff(bufsize=65536) for block in blockgen: img_file.write(block) img_file.close() fa_type = omero.model.FileAnnotationI attachments = [ann for ann in img.listAnnotations() if ann.OMERO_TYPE == fa_type] if get_att and len(attachments) > 0: att_dir = img_path + "_attachments" os.mkdir(att_dir) def download_attachment(att, att_dir): """download OMERO file annotation to att_dir""" att_file = open(os.path.join(att_dir, att.getFileName()), "wb") for att_chunk in att.getFileInChunks(): att_file.write(att_chunk) att_file.close() for att in attachments: download_attachment(att, att_dir) return img_path def _unique_name(self, img_name, im_id): """Make unique name combining a file basename & OMERO Image id""" path_and_base, ext = os.path.splitext(img_name) base = os.path.basename(path_and_base) # name in OMERO can has path return "{0}_{1}".format(base, str(im_id)) def dget(self, dataset_id): """ Download an entire OMERO Dataset to the current directory. """ downloads = [] wdir = os.getcwd() dset_name = self.conn.getObject("Dataset", dataset_id).getName() dset_path = os.path.join(wdir, dset_name + "_D" + str(dataset_id)) os.mkdir(dset_path) os.chdir(dset_path) for img_id, img_name in self._ls_images(dataset_id): downloads.append(self.get(img_id)) os.chdir(wdir) return downloads def pget(self, project_id): """ Download an entire OMERO Project to the current directory. """ downloads = [] wdir = os.getcwd() proj_name = self.conn.getObject("Project", project_id).getName() proj_path = os.path.join(wdir, proj_name + "_P" + str(project_id)) os.mkdir(proj_path) os.chdir(proj_path) for dset_id, dset_name in self._ls_datasets(project_id): downloads.extend(self.dget(dset_id)) os.chdir(wdir) return downloads def put(self, filename, name=None, dataset=None): """ Import filename using OMERO CLI, optionally with a specified name to a specified dataset (dataset_id). Return : OMERO image Id """ cli = omero.cli.CLI() cli.loadplugins() import_args = ["import"] import_args.extend(["-s", str(self._server)]) import_args.extend(["-k", str(self._key)]) if dataset is not None: import_args.extend(["-d", str(dataset)]) if name is not None: import_args.extend(["-n", str(name)]) clio = "cli.out" clie = "cli.err" import_args.extend(["---errs=" + clie, "---file=" + clio, "--"]) import_args.append(filename) cli.invoke(import_args, strict=True) pix_id = int(open(clio, 'r').read().rstrip()) im_id = self.conn.getQueryService().get("Pixels", pix_id).image.id.val os.remove(clio) os.remove(clie) return im_id def describe(self, im_id, description): """ Append to image description. """ img = self.conn.getObject("Image", oid=im_id) old_description = img.getDescription() or "" img.setDescription(old_description + "\n" + description) img.save() def attach(self, im_id, attachments): """ Attach a list of files to an image. """ img = self.conn.getObject("Image", oid=im_id) for attachment in attachments.split(): fann = self.conn.createFileAnnfromLocalFile(attachment) img.linkAnnotation(fann) img.save() # TODO: ls_tags() and tag() methods? def mkp(self, project_name, description=None): """ Make new OMERO project in current group, returning the new project Id. """ # see: omero/lib/python/omeroweb/webclient/controller/container.py proj = omero.model.ProjectI() proj.name = omero.rtypes.rstring(str(project_name)) if description is not None and description != "": proj.description = omero.rtypes.rstring(str(description)) return self._save_and_return_id(proj) def mkd(self, dataset_name, project_id=None, description=None): """ Make new OMERO dataset, returning the new dataset Id. """ dset = omero.model.DatasetI() dset.name = omero.rtypes.rstring(str(dataset_name)) if description is not None and description != "": dset.description = omero.rtypes.rstring(str(description)) if project_id is not None: l_proj_dset = omero.model.ProjectDatasetLinkI() proj = self.conn.getObject("Project", project_id) l_proj_dset.setParent(proj._obj) l_proj_dset.setChild(dset) dset.addProjectDatasetLink(l_proj_dset) return self._save_and_return_id(dset) def _save_and_return_id(self, obj): """Save new omero object and return id assgined to it""" # see: OmeroWebGateway.saveAndReturnId # in: lib/python/omeroweb/webclient/webclient_gateway.py u_s = self.conn.getUpdateService() res = u_s.saveAndReturnObject(obj, self.conn.SERVICE_OPTS) res.unload() return res.id.val def im(self, im_id): """ Return an Im object for the image id specified. """ img = self.conn.getObject("Image", im_id) # build pixel np.ndarray nx, ny = img.getSizeX(), img.getSizeY() nz, nt, nc = img.getSizeZ(), img.getSizeT(), img.getSizeC() planes = [(z, c, t) for c in range(nc) for t in range(nt) for z in range(nz)] pix_gen = img.getPrimaryPixels().getPlanes(planes) pix = np.array([i for i in pix_gen]).reshape((nc, nt, nz, ny, nx)) # initialize Im using pix and extracted metadata meta = self._extract_meta(img, im_id) return Im(pix=pix, meta=meta) def _extract_meta(self, img, im_id): """Extract metadata attributes from OMERO Blitz gateway Image""" meta = {} meta['name'] = self._unique_name(img.getName(), im_id) meta['description'] = img.getDescription() def _extract_ch_info(ch): """extract core metadata for for channel, return as dict""" ch_info = {'label': ch.getLabel()} ch_info['ex_wave'] = ch.getExcitationWave() ch_info['em_wave'] = ch.getEmissionWave() ch_info['color'] = ch.getColor().getRGB() return ch_info meta['channels'] = [_extract_ch_info(ch) for ch in img.getChannels()] meta['pixel_size'] = {'x': img.getPixelSizeX(), 'y': img.getPixelSizeY(), 'z': img.getPixelSizeZ(), 'units': "um"} tag_type = omero.model.TagAnnotationI tags = [ann for ann in img.listAnnotations() if ann.OMERO_TYPE == tag_type] meta['tags'] = {tag.getValue() + " (" + str(tag.getId()) + ")": tag.getDescription() for tag in tags} fa_type = omero.model.FileAnnotationI attachments = [ann for ann in img.listAnnotations() if ann.OMERO_TYPE == fa_type] meta['attachments'] = [att.getFileName() + " (" + str(att.getId()) + ")" for att in attachments] user_id = self.conn.getUser().getName() + " (" + \ str(self.conn.getUser().getId()) + ") @" + self.conn.host meta_ext = {} meta_ext['user_id'] = user_id meta['meta_ext'] = meta_ext # TODO: ROIs, display settings? # objective: Image.loadOriginalMetadata()[1][find 'Lens ID Number'][1], return meta def imput(self, im, dataset_id=None): """ Create a new OMERO Image using an Im object, returning new image id. """ # see: omero/lib/python/omero/util/script_utils.py # see: omero/lib/python/omeroweb/webclient/webclient_gateway.py # see: https://gist.github.com/will-moore/4141708 if not isinstance(im, Im): raise TypeError("first imput argument must be of type Im") nc, nt, nz, ny, nx = im.shape ch_nums = range(nc) q_s = self.conn.getQueryService() p_s = self.conn.getPixelsService() c_s = self.conn.getContainerService() u_s = self.conn.getUpdateService() pu_s = self.conn.c.sf.createRawPixelsStore() q_ptype = "from PixelsType as p where p.value='{0}'".format( str(im.dtype)) pixelsType = q_s.findByQuery(q_ptype, None) im_id = p_s.createImage(nx, ny, nz, nt, ch_nums, pixelsType, im.name, im.description) img_i = c_s.getImages("Image", [im_id.getValue()], None)[0] img = self.conn.getObject("Image", im_id.getValue()) pix_id = img_i.getPrimaryPixels().getId().getValue() pu_s.setPixelsId(pix_id, True) for c in range(nc): for t in range(nt): for z in range(nz): plane = im.pix[c, t, z, :, :] script_utils.uploadPlaneByRow(pu_s, plane, z, c, t) l_dset_im = omero.model.DatasetImageLinkI() dset = self.conn.getObject("Dataset", dataset_id) l_dset_im.setParent(dset._obj) l_dset_im.setChild(img._obj) self._update_meta(im, im_id) u_s.saveObject(l_dset_im, self.conn.SERVICE_OPTS) return im_id.getValue() def _update_meta(self, im, im_id): """Set OMERO Image metadata using Im metadata"""
# first parameter scripts.String( "Data_Type", optional=False, values=dataTypes, default="Project"), # second parameter scripts.List("IDs", optional=False).ofType(rlong(0)), scripts.String("File_Name", optional=False), ) # we can now create our Blitz Gateway by wrapping the client object conn = BlitzGateway(client_obj=client) # get the parameters IDs = unwrap(client.getInput("IDs")) projectId = IDs[0] fileName = unwrap(client.getInput("File_Name")) project = conn.getObject('Project', projectId) message = "No file downloaded." ratingNs = None for ann in project.listAnnotations(): if isinstance(ann, omero.gateway.FileAnnotationWrapper): name = ann.getFile().getName() print "File ID: %s Name: %s Size: %s" % ( ann.getFile().getId(), name, ann.getFile().getSize()) if fileName == name: file_path = 'downloadFile' f = open(file_path, 'w') print "\nDownloading file ", fileName, "to", file_path, "..." try: for chunk in ann.getFileInChunks(): f.write(chunk) finally:
sys.exit(1) else: print "Login successful.\n" # Set our default group so we can see the data. # (I thought I had to do this, but now I am not sure it's needed. # -JLM 2013/12/04) try: group = next(g for g in conn.listGroups() if g.getName() == GROUP) except StopIteration: print >> sys.stderr, "Error: could not find group '%s'" % GROUP conn.setGroupForSession(group.getId()) # Get plate of interest. # (ID 1552 is "RTK ligands induce differing FOXO3a translocation dynamics") plate = conn.getObject('Plate', 1552) # Get list of lists of well objects from the plate. well_grid = plate.getWellGrid() # Loop over all wells in the plate. for (raw_row_num, row) in enumerate(well_grid): for (raw_col_num, well) in enumerate(row): # Fix up row and column numbers to match Pat's nomenclature. row_num = raw_row_num + 1 col_num = raw_col_num + 2 # Construct paths and create output directory. (I dislike this naming # format but it's what Pat has chosen. -JLM) output_dir = 'r%dc%d' % (row_num, col_num) full_output_dir = os.path.join(frame_dir, output_dir) makedirs_exist_ok(full_output_dir) # Get image object and a few of its useful attributes.
# process the list of args above. Not scrictly necessary, but useful for more complex scripts scriptParams = {} for key in client.getInputKeys(): if client.getInput(key): scriptParams[key] = client.getInput(key, unwrap=True) # unwrap rtypes to String, Integer etc print scriptParams # handy to have inputs in the std-out log # wrap client to use the Blitz Gateway conn = BlitzGateway(client_obj=client) # do the editing... editedImgIds = editDescriptions(conn, scriptParams) # now handle the result, displaying message and returning image if appropriate if editedImgIds is None: message = "Script failed. See 'error' or 'info' for more details" else: if len(editedImgIds) == 1: img = conn.getObject("Image", editedImgIds[0]) # image-wrapper message = "One Image edited: %s" % img.getName() omeroImage = img._obj # omero.model object client.setOutput("Edited Image", robject(omeroImage)) # Insight will display 'View' link to image elif len(editedImgIds) > 1: message = "%s Images edited" % len(editedImgIds) else: message = "No images edited. See 'error' or 'info' for more details" client.setOutput("Message", rstring(message)) # Insight will display the 'Message' parameter finally: client.closeSession()
def run(): """ """ dataTypes = [rstring("Plate")] client = scripts.client( "Manage_Plate_Acquisitions.py", "Add or remove PlateAcquisition(s) in a given Plate", scripts.String("Data_Type", optional=False, grouping="1", description="The data type you want to work with.", values=dataTypes, default="Plate"), scripts.List("IDs", optional=False, grouping="2", description="List of Plate IDs").ofType(rlong(0)), scripts.String("Mode", optional=False, grouping="3", description="Select if you want to add or " "remove PlateAcquisitions", values=[rstring("Add"), rstring("Remove")], default="Add"), version="0.2", authors=["Niko Klaric"], institutions=["Glencoe Software Inc."], contact="*****@*****.**", ) try: scriptParams = {} for key in client.getInputKeys(): if client.getInput(key): scriptParams[key] = client.getInput(key, unwrap=True) connection = BlitzGateway(client_obj=client) updateService = connection.getUpdateService() queryService = connection.getQueryService() processedMessages = [] for plateId in scriptParams["IDs"]: plateObj = connection.getObject("Plate", plateId) if plateObj is None: client.setOutput( "Message", rstring("ERROR: No Plate with ID %s" % plateId)) return if scriptParams["Mode"] == "Add": plateAcquisitionObj = PlateAcquisitionI() plateAcquisitionObj.setPlate(PlateI(plateObj.getId(), False)) wellGrid = plateObj.getWellGrid() for axis in wellGrid: for wellObj in axis: wellSampleList = wellObj.copyWellSamples() plateAcquisitionObj.addAllWellSampleSet(wellSampleList) plateAcquisitionObj = updateService.saveAndReturnObject( plateAcquisitionObj) plateAcquisitionId = plateAcquisitionObj.getId()._val processedMessages.append( "Linked new PlateAcquisition with ID %d" " to Plate with ID %d." % (plateAcquisitionId, plateId)) else: params = ParametersI() params.addId(plateId) queryString = """ FROM PlateAcquisition AS pa LEFT JOIN FETCH pa.wellSample LEFT OUTER JOIN FETCH pa.annotationLinks WHERE pa.plate.id = :id """ plateAcquisitionList = queryService.findAllByQuery( queryString, params, connection.SERVICE_OPTS) if plateAcquisitionList: updateList = [] for plate_acquisition in plateAcquisitionList: for well_sample in plate_acquisition.copyWellSample(): well_sample.setPlateAcquisition(None) updateList.append(well_sample) updateService.saveArray(updateList) plate_acquisition.clearWellSample() plate_acquisition.clearAnnotationLinks() plate_acquisition = updateService.saveAndReturnObject( plate_acquisition) updateService.deleteObject(plate_acquisition) processedMessages.append( "%d PlateAcquisition(s) removed from Plate with ID %d." % (len(plateAcquisitionList), plateId)) client.setOutput("Message", rstring("No errors. %s" % " ".join(processedMessages))) finally: client.closeSession()
print scriptParams # handy to have inputs in the std-out log # wrap client to use the Blitz Gateway conn = BlitzGateway(client_obj=client) # do the editing... editedImgIds = editDescriptions(conn, scriptParams) # now handle the result, displaying message and returning image if # appropriate if editedImgIds is None: message = "Script failed. See 'error' or 'info' for more details" else: if len(editedImgIds) == 1: # image-wrapper img = conn.getObject("Image", editedImgIds[0]) message = "One Image edited: %s" % img.getName() # omero.model object omeroImage = img._obj # Insight will display 'View' link to image client.setOutput("Edited Image", robject(omeroImage)) elif len(editedImgIds) > 1: message = "%s Images edited" % len(editedImgIds) else: message = ("No images edited. See 'error' or 'info' for more" " details") # Insight will display the 'Message' parameter client.setOutput("Message", rstring(message)) finally: client.closeSession()
import omero.util.figureUtil as figUtil # Create a connection # ================================================================= conn = BlitzGateway("username", "password", host="localhost") conn.connect() # Configuration # ================================================================= imageId = 67 # The Image we want to Analyse # ================================================================= image = conn.getObject('Image', imageId) # To keep things simple, we'll work with a single Ellipse per T # ================================================================= def getEllipses(conn, imageId): """ Returns the a dict of tIndex: {'cx':cx, 'cy':cy, 'rx':rx, 'ry':ry, 'z':z} NB: Assume only 1 ellipse per time point @param conn: BlitzGateway connection @param imageId: Image ID """ ellipses = {} result = conn.getRoiService().findByImage(imageId, None, conn.SERVICE_OPTS)
# Create a connection # ================================================================= conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) conn.connect() # Configuration # ================================================================= imageId = 101 # Get the 'Fileset' for an Image # ================================================================= # A Fileset is a collection of the original files imported to # create an image or set of images in OMERO. image = conn.getObject("Image", imageId) fileset = image.getFileset() # will be None for pre-FS images fsId = fileset.getId() # List all images that are in this fileset for fsImage in fileset.copyImages(): print fsImage.getId(), fsImage.getName() # List original imported files for origFile in fileset.listFiles(): name = origFile.getName() path = origFile.getPath() print path, name # Get Original Imported Files directly from the image # ================================================================= # this will include pre-FS data IF images were archived on import
from omero.gateway import BlitzGateway import omero from omero.rtypes import * from Connect_To_OMERO import USERNAME, PASSWORD, HOST, PORT # create a connection conn = BlitzGateway(USERNAME, PASSWORD, host="gretzky.openmicroscopy.org.uk", port=PORT) conn.connect() from random import random from numpy import array import math datasetId = 2651 dataset = conn.getObject("Dataset", datasetId) # first create our table... # columns we want are: imageId, roiId, shapeId, theZ, theT, lineLength, shapetext. columns = [ omero.grid.LongColumn("imageId", "", []), omero.grid.RoiColumn("roidId", "", []), omero.grid.LongColumn("shapeId", "", []), omero.grid.LongColumn("theZ", "", []), omero.grid.LongColumn("theT", "", []), omero.grid.DoubleColumn("lineLength", "", []), omero.grid.StringColumn("shapeText", "", 64, []), ] # create and initialize the table table = conn.c.sf.sharedResources().newTable(1, "LineLengths%s" % str(random())) table.initialize(columns)
def test_chgrp_old_container(self, dataset, credentials): """ Tests Admin moving user's Dataset to their Private group and linking it to an existing Project there. Bug from https://github.com/openmicroscopy/openmicroscopy/pull/3420 """ django_client = self.get_django_client(credentials) # user creates project in their target group project = ProjectI() projectName = "chgrp-target-%s" % self.client.getSessionId() project.name = rstring(projectName) ctx = {"omero.group": str(self.group2.id.val)} project = self.sf.getUpdateService().saveAndReturnObject(project, ctx) request_url = reverse('chgrp') data = { "group_id": self.group2.id.val, "Dataset": dataset.id.val, "target_id": "project-%s" % project.id.val, } data = _csrf_post_response_json(django_client, request_url, data) expected = {"update": {"childless": {"project": [], "orphaned": False, "dataset": []}, "remove": {"project": [], "plate": [], "screen": [], "image": [], "dataset": [dataset.id.val]}}} assert data == expected activities_url = reverse('activities_json') data = _get_response_json(django_client, activities_url, {}) # Keep polling activities until no jobs in progress while data['inprogress'] > 0: time.sleep(0.5) data = _get_response_json(django_client, activities_url, {}) # individual activities/jobs are returned as dicts within json data for k, o in data.items(): if hasattr(o, 'values'): # a dict if 'report' in o: print o['report'] assert o['status'] == 'finished' assert o['job_name'] == 'Change group' assert o['to_group_id'] == self.group2.id.val # Dataset should now be in new group, contained in Project conn = BlitzGateway(client_obj=self.client) userId = conn.getUserId() conn.SERVICE_OPTS.setOmeroGroup('-1') d = conn.getObject("Dataset", dataset.id.val) assert d is not None assert d.getDetails().group.id.val == self.group2.id.val p = d.getParent() assert p is not None assert p.getName() == projectName # Project owner should be current user assert p.getDetails().owner.id.val == userId assert p.getId() == project.id.val
class TableConnection(object): """ A basic client-side wrapper for OMERO.tables which handles opening and closing tables. """ def __init__(self, user = None, passwd = None, host = 'localhost', client = None, tableName = None, tableId = None): """ Create a new table handler, either by specifying user and passwd or by providing a client object (for scripts) @param user Username @param passwd Password @param host The server hostname @param client Client object with an active session @param tableName The name of the table file @param tableId The OriginalFile ID of the table file """ if not client: client = omero.client(host) sess = client.createSession(user, passwd) client.enableKeepAlive(60) else: sess = client.getSession() self.conn = BlitzGateway(client_obj = client) self.res = sess.sharedResources() if (not self.res.areTablesEnabled()): raise TableConnectionError('OMERO.tables not enabled') repos = self.res.repositories() self.rid = repos.descriptions[0].id.val self.tableName = tableName self.tableId = tableId self.table = None def __enter__(self): print 'Entering Connection' return self def __exit__(self, type, value, traceback): print 'Exiting Connection' self.close() def close(self): print 'Closing Connection' try: self.closeTable() finally: self.conn._closeSession() def openTable(self, tableId = None, tableName = None): """ Opens an existing table by ID or name. If there are multiple tables with the same name this throws an error (should really use an annotation to keep track of this). If tableId is supplied it will be used in preference to tableName @param tableName The name of the table file @param tableId The OriginalFile ID of the table file @return handle to the table """ if not tableId and not tableName: tableId = self.tableId tableName = self.tableName if not tableId: if not tableName: tableName = self.tableName attrs = {'name': tableName} ofiles = list( self.conn.getObjects("OriginalFile", attributes = attrs)) if len(ofiles) > 1: raise TableConnectionError( 'Multiple tables with name:%s found' % tableName) if not ofiles: raise TableConnectionError( 'No table found with name:%s' % tableName) ofile = ofiles[0] else: attrs = {'id': long(tableId)} if tableName: attrs['name'] = tableName ofile = self.conn.getObject("OriginalFile", attributes = attrs) if not ofile: raise TableConnectionError('No table found with name:%s id:%s' % (tableName, tableId)) if self.tableId == ofile.getId(): print 'Using existing connection to table name:%s id:%d' % \ (tableName, tableId) else: self.closeTable() self.table = self.res.openTable(ofile._obj) self.tableId = ofile.getId() print 'Opened table name:%s id:%d' % (tableName, self.tableId) try: print '\t%d rows %d columns' % \ (self.table.getNumberOfRows(), len(self.table.getHeaders())) except omero.ApiUsageException: pass self.tableId = tableId return self.table def deleteAllTables(self): """ Delete all tables with self.tableName Will fail if there are any annotation links """ ofiles = self.conn.getObjects("OriginalFile", \ attributes = {'name': self.tableName}) ids = [f.getId() for f in ofiles] print 'Deleting ids:%s' % ids self.conn.deleteObjects('OriginalFile', ids) def dumpTable(self, table): """ Print out the table """ headers = table.getHeaders() print ', '.join([t.name for t in headers]) nrows = table.getNumberOfRows() #data = table.readCoordinates(xrange(table.getNumberOfRows)) for r in xrange(nrows): data = table.read(range(len(headers)), r, r + 1) print ', '.join(['%.2f' % c.values[0] for c in data.columns]) def closeTable(self): """ Close the table if open, and set table and tableId to None """ try: if self.table: self.table.close() finally: self.table = None self.tableId = None def newTable(self, schema): """ Create a new uninitialised table @param schema the table description @return A handle to the table """ self.closeTable() self.table = self.res.newTable(self.rid, self.tableName) ofile = self.table.getOriginalFile() self.tableId = ofile.getId().getValue() try: self.table.initialize(schema) print "Initialised '%s' (%d)" % (self.tableName, self.tableId) except Exception as e: print "Failed to create table: %s" % e try: self.table.delete except Exception as ed: print "Failed to delete table: %s" % ed self.table = None self.tableId = None raise e return self.table def chunkedRead(self, colNumbers, start, stop, chunk): """ Split a call to table.read(), into multiple chunks to limit the number of rows returned in one go. @param colNumbers A list of columns indices to be read @param start The first row to be read @param stop The last + 1 row to be read @param chunk The maximum number of rows to read in each call @return a data object, note lastModified will be set to the timestamp the first chunked call """ p = start q = min(start + chunk, stop) data = self.table.read(colNumbers, p, q) p, q = q, min(q + chunk, stop) while p < stop: data2 = self.table.read(colNumbers, p, q) data.rowNumbers.extend(data2.rowNumbers) for (c, c2) in izip(data.columns, data2.columns): c.values.extend(c2.values) p, q = q, min(q + chunk, stop) return data
plate = conn.getObject("Plate", plateId) print "\nNumber of fields:", plate.getNumberOfFields() print "\nGrid size:", plate.getGridSize() print "\nWells in Plate:", plate.getName() for well in plate.listChildren(): index = well.countWellSample() print " Well: ", well.row, well.column, " Fields:", index for index in xrange(0, index): print " Image: ", \ well.getImage(index).getName(),\ well.getImage(index).getId() """ # Retrieve Wells and Images within a Screen: # ================================================================= if screenId >= 0: screen = conn.getObject("Screen", screenId) for plate in screen.listChildren(): index = plate.countPlateSample() for index in xrange(0, index): print "ScreenID:", screenId, "Name:", screen.getName() # Close connection: # ================================================================= # When you're done, close the session to free up server resources. conn._closeSession()