def image_rois(self, user1, shapes): """Return Image with ROIs.""" image = ImageI() image.name = rstring('Image for ROIs') image = get_update_service(user1).saveAndReturnObject(image) # ROI with all but one shapes rois = [] roi = RoiI() for shape in shapes[:-1]: roi.addShape(shape) roi.setImage(image) rois.append(roi) # roi without shapes roi = RoiI() roi.setImage(image) rois.append(roi) # roi without image roi = RoiI() roi.addShape(shapes[-1]) rois.append(roi) rois = get_update_service(user1).saveAndReturnArray(rois) rois.sort(key=lambda x: x.id.val) return image, rois
def setUp(self): AbstractPlateAnalysisCtx.DEFAULT_ORIGINAL_FILE_PROVIDER = \ FromFileOriginalFileProvider original_files = list() # Create our container images and an original file image map images = list() n_images = 0 for row in range(16): for column in range(24): well = WellI(n_images, True) well.column = rint(column) well.row = rint(row) well_sample = WellSampleI(n_images, True) well_sample.well = well image = ImageI(n_images, True) image.addWellSample(well_sample) images.append(image) original_file_image_map = dict() # Our required original file format format = rstring('Companion/InCell') # Create original file representing the result file o = OriginalFileI(1, True) o.name = rstring(self.RESULT_FILE) o.path = rstring(os.path.join(self.ROOT, self.RESULT_FILE)) o.mimetype = format original_files.append(o) # [1] = o original_file_image_map[1] = image sf = TestingServiceFactory() self.analysis_ctx = InCellPlateAnalysisCtx(images, original_files, original_file_image_map, 1, sf)
def setUp(self): AbstractPlateAnalysisCtx.DEFAULT_ORIGINAL_FILE_PROVIDER = \ FromFileOriginalFileProvider original_files = list() # Create our container images and an original file image map images = list() n_images = 0 for row in range(16): for column in range(24): well = WellI(n_images, True) well.column = rint(column) well.row = rint(row) well_sample = WellSampleI(n_images, True) well_sample.well = well image = ImageI(n_images, True) image.addWellSample(well_sample) images.append(image) original_file_image_map = dict() # Our required original file format format = rstring('Companion/InCell') # Create original file representing the result file o = OriginalFileI(1L, True) o.name = rstring(self.RESULT_FILE) o.path = rstring(os.path.join(self.ROOT, self.RESULT_FILE)) o.mimetype = format original_files.append(o) #[1L] = o original_file_image_map[1L] = image sf = TestingServiceFactory() self.analysis_ctx = InCellPlateAnalysisCtx( images, original_files, original_file_image_map, 1L, sf)
def image_no_acquisition_date(request, gatewaywrapper): """Creates an Image.""" gatewaywrapper.loginAsAuthor() gw = gatewaywrapper.gateway update_service = gw.getUpdateService() image = ImageI() image.name = rstring('an image') image.acquisitionDate = rtime(0L) image_id, = update_service.saveAndReturnIds([image]) return gw.getObject('Image', image_id)
def image(request, gatewaywrapper): """Creates an Image.""" gatewaywrapper.loginAsAuthor() gw = gatewaywrapper.gateway update_service = gw.getUpdateService() image = ImageI() image.name = rstring('an image') # 2015-04-21 01:15:00 image.acquisitionDate = rtime(int(1429578900000)) image_id, = update_service.saveAndReturnIds([image]) return gw.getObject('Image', image_id)
def wrapped_image(): image = ImageI() image.id = rlong(1L) image.description = rstring('description') image.name = rstring('name') image.acquisitionDate = rtime(1000) # In milliseconds image.details.owner = ExperimenterI(1L, False) creation_event = EventI() creation_event.time = rtime(2000) # In milliseconds image.details.creationEvent = creation_event return ImageWrapper(conn=MockConnection(), obj=image)
def testListOrphans(self, orphaned, load_pixels, gatewaywrapper): # We login as 'User', since they have no other orphaned images gatewaywrapper.loginAsUser() conn = gatewaywrapper.gateway eid = conn.getUserId() # Create 5 orphaned images iids = [] for i in range(0, 5): img = gatewaywrapper.createTestImage(imageName=str(uuid.uuid1())) iids.append(img.id) # Create image in Dataset, to check this isn't found dataset = DatasetI() dataset.name = wrap('testListOrphans') image = ImageI() image.name = wrap('testListOrphans') dataset.linkImage(image) dataset = conn.getUpdateService().saveAndReturnObject(dataset) try: # Only test listOrphans() if orphaned if orphaned: # Pagination params = omero.sys.ParametersI() params.page(1, 3) findImagesInPage = list( conn.listOrphans("Image", eid=eid, params=params)) assert len(findImagesInPage) == 3 # No pagination (all orphans) findImages = list( conn.listOrphans("Image", loadPixels=load_pixels)) assert len(findImages) == 5 for p in findImages: assert p._obj.pixelsLoaded == load_pixels # Test getObjects() with 'orphaned' option opts = {'orphaned': orphaned, 'load_pixels': load_pixels} getImages = list(conn.getObjects("Image", opts=opts)) assert orphaned == (len(getImages) == 5) for p in getImages: assert p._obj.pixelsLoaded == load_pixels # Simply check this doesn't fail See https://github.com/ # openmicroscopy/openmicroscopy/pull/4950#issuecomment-264142956 dsIds = [d.id for d in conn.listOrphans("Dataset")] assert dataset.id.val in dsIds finally: # Cleanup - Delete what we created conn.deleteObjects('Image', iids, deleteAnns=True, wait=True) conn.deleteObjects('Dataset', [dataset.id.val], deleteChildren=True, wait=True)
def create_image(image_index): image = ImageI() image.name = rstring("%s_%d" % (uuid(), image_index)) image.acquisitionDate = rtime(0) pixels = PixelsI() pixels.sha1 = rstring("") pixels.sizeX = rint(1) pixels.sizeY = rint(1) pixels.sizeZ = rint(1) pixels.sizeC = rint(1) pixels.sizeT = rint(1) pixels.dimensionOrder = DimensionOrderI(1L, False) # XYZCT pixels.pixelsType = PixelsTypeI(1L, False) # bit image.addPixels(pixels) return image
def project_datasets(self, user1): """Return Project with Datasets and an orphaned Dataset.""" # Create and name all the objects project = ProjectI() project.name = rstring('Project') # Create 5 Datasets, each with 0-4 images. for d in range(5): dataset1 = DatasetI() dataset1.name = rstring('Dataset%s' % d) for i in range(d): image = self.create_test_image(size_x=5, size_y=5, session=user1[0].getSession(), name="Image%s" % i) image = ImageI(image.id.val, False) dataset1.linkImage(image) project.linkDataset(dataset1) # Create single orphaned Dataset dataset = DatasetI() dataset.name = rstring('Dataset') project = get_update_service(user1).saveAndReturnObject(project) dataset = get_update_service(user1).saveAndReturnObject(dataset) return project, dataset
def save_rois(conn, image, rois): log.info('Creating %d ROIs for image %s' % (len(rois), image.name)) us = conn.getUpdateService() for roi in rois: roi.setImage(ImageI(image.id, False)) roi1 = us.saveAndReturnObject(roi) assert roi1
def create_map_annotation(ctx, annotation, target_id, target_type="Project"): """Creates a map annotation, uploads it to Omero, and links it to target object""" # populate new MapAnnotationData object with dictionary result = ArrayList() for item in annotation: # add key:value pairs; both need to be strings result.add(NamedValue(str(item), str(annotation[item]))) data = MapAnnotationData() data.setContent(result) data.setDescription("Demo Example") #Use the following namespace if you want the annotation to be editable in the webclient and insight data.setNameSpace(MapAnnotationData.NS_CLIENT_CREATED) dm = gateway.getFacility(DataManagerFacility) target_obj = None # use the appropriate target DataObject and attach the MapAnnotationData object to it if target_type == "Project": target_obj = ProjectData(ProjectI(target_id, False)) elif target_type == "Dataset": target_obj = DatasetData(DatasetI(target_id, False)) elif target_type == "Image": target_obj = ImageData(ImageI(target_id, False)) result = dm.attachAnnotation(ctx, data, target_obj) return result
def screen_plates(self, user1): """Return Screen with Plates and an orphaned Plate.""" # Create and name all the objects screen = ScreenI() screen.name = rstring('screen') for i in range(5): plate1 = PlateI() plate1.name = rstring('Plate%s' % i) screen.linkPlate(plate1) # Create single orphaned Plate plate = PlateI() plate.name = rstring('plate') screen = get_update_service(user1).saveAndReturnObject(screen) plate = get_update_service(user1).saveAndReturnObject(plate) # Add well to first plate plates = screen.linkedPlateList() plates.sort(cmp_name_insensitive) plate_id = plates[0].id.val well = WellI() well.column = rint(0) well.row = rint(0) well.plate = PlateI(plate_id, False) image = self.create_test_image(size_x=5, size_y=5, session=user1[0].getSession()) ws = WellSampleI() ws.image = ImageI(image.id, False) ws.well = well well.addWellSample(ws) well = get_update_service(user1).saveAndReturnObject(well) return screen, plate
def upload_csv_to_omero(ctx, file, tablename, target_id, target_type="Project"): """Upload the CSV file and attach it to the specified object""" print file print file.name svc = gateway.getFacility(DataManagerFacility) file_size = os.path.getsize(file.name) original_file = OriginalFileI() original_file.setName(rstring(tablename)) original_file.setPath(rstring(file.name)) original_file.setSize(rlong(file_size)) checksum_algorithm = ChecksumAlgorithmI() checksum_algorithm.setValue(rstring(ChecksumAlgorithmSHA1160.value)) original_file.setHasher(checksum_algorithm) original_file.setMimetype(rstring("text/csv")) original_file = svc.saveAndReturnObject(ctx, original_file) store = gateway.getRawFileService(ctx) # Open file and read stream store.setFileId(original_file.getId().getValue()) print original_file.getId().getValue() try: store.setFileId(original_file.getId().getValue()) with open(file.name, 'rb') as stream: buf = 10000 for pos in range(0, long(file_size), buf): block = None if file_size - pos < buf: block_size = file_size - pos else: block_size = buf stream.seek(pos) block = stream.read(block_size) store.write(block, pos, block_size) original_file = store.save() finally: store.close() # create the file annotation namespace = "training.demo" fa = FileAnnotationI() fa.setFile(original_file) fa.setNs(rstring(namespace)) if target_type == "Project": target_obj = ProjectData(ProjectI(target_id, False)) elif target_type == "Dataset": target_obj = DatasetData(DatasetI(target_id, False)) elif target_type == "Image": target_obj = ImageData(ImageI(target_id, False)) svc.attachAnnotation(ctx, FileAnnotationData(fa), target_obj)
def run(password, admin_name, target, tag, host, port): for i in range(1, 51): username = "******" % i print(username) conn = BlitzGateway(username, password, host=host, port=port) try: conn.connect() updateService = conn.getUpdateService() ds = conn.getObject("Dataset", attributes={'name': target}, opts={'owner': conn.getUserId()}) if ds is None: print("No dataset with name %s found" % target) continue params = omero.sys.ParametersI() params.addString('username', admin_name) query = "from TagAnnotation where textvalue='%s' \ AND details.owner.omeName=:username" % tag query_service = conn.getQueryService() tags = query_service.findAllByQuery(query, params, conn.SERVICE_OPTS) if len(tags) == 0: print("No tag with name %s found" % tag) continue tag_id = tags[0].id.getValue() print(tag_id) links = [] for image in ds.listChildren(): name = image.getName() if name in images_to_tag: # Check first that the image is not tagged params = omero.sys.ParametersI() params.addLong('parent', image.id) params.addLong('child', tag_id) query = "select link from ImageAnnotationLink as link \ where link.parent.id=:parent \ AND link.child.id=:child" values = query_service.findAllByQuery( query, params, conn.SERVICE_OPTS) if len(values) == 0: link = ImageAnnotationLinkI() link.parent = ImageI(image.id, False) link.child = TagAnnotationI(tag_id, False) links.append(link) else: print("Tag %s already linked to %s" % (tag, name)) if len(links) > 0: updateService.saveArray(links) except Exception as exc: print("Error when tagging the images: %s" % str(exc)) finally: conn.close()
def testChgrpImage(self): """ Tests chgrp for a dummny image object (no Pixels) """ # One user in two groups client, exp = self.new_client_and_user() grp = self.new_group([exp]) gid = grp.id.val client.sf.getAdminService().getEventContext() # Reset session update = client.sf.getUpdateService() query = client.sf.getQueryService() admin = client.sf.getAdminService() first_gid = admin.getEventContext().groupId # Create a dataset in the 'first group' ds = self.make_dataset(name="testChgrpImage_target", client=client) ds_id = ds.id.val # Change our context to new group and create image admin.setDefaultGroup(exp, ExperimenterGroupI(gid, False)) self.set_context(client, gid) update = client.sf.getUpdateService() # do we need to get this again? img = self.new_image() img = update.saveAndReturnObject(img) # Move image to new group chgrp = Chgrp2(targetObjects={'Image': [img.id.val]}, groupId=first_gid) # Link to Save link = DatasetImageLinkI() link.child = ImageI(img.id.val, False) link.parent = DatasetI(ds_id, False) save = Save() save.obj = link requests = [chgrp, save] # we're going to chgrp THEN save DIlink # Change our context to original group... admin.setDefaultGroup(exp, ExperimenterGroupI(first_gid, False)) self.set_context(client, first_gid) # We have to be in destination group for link Save to work self.doSubmit(requests, client) # ...check image img = client.sf.getQueryService().get("Image", img.id.val) assert img.details.group.id.val == first_gid # check Dataset query = "select link from DatasetImageLink link\ where link.child.id=%s" % img.id.val l = client.sf.getQueryService().findByQuery(query, None) assert l is not None, "New DatasetImageLink on image not found" assert l.details.group.id.val == first_gid,\ "Link Created in same group as Image target"
def link_tags(conn, datasetname, image_tag_links, image_ratings): for i in range(1, 51): username = "******" % i print(username) exp = conn.getAdminService().lookupExperimenter(username) exp_id = exp.id.val dataset = conn.getObject("Dataset", attributes={'name': datasetname}, opts={'owner': exp_id}) if dataset is None: print("Dataset not found") continue links = [] for image in dataset.listChildren(): name = image.name if name in image_tag_links: for tag_id in image_tag_links[name]: link = ImageAnnotationLinkI() link.parent = ImageI(image.id, False) link.child = TagAnnotationI(tag_id, False) link.details.owner = ExperimenterI(exp_id, False) links.append(link) if name in image_ratings: link = ImageAnnotationLinkI() link.parent = ImageI(image.id, False) r = LongAnnotationI() r.ns = rstring(RATING_NS) r.longValue = rlong(image_ratings[name]) link.child = r link.details.owner = ExperimenterI(exp_id, False) links.append(link) print('links', len(links)) group_id = dataset.getDetails().getGroup().id conn.SERVICE_OPTS.setOmeroGroup(group_id) try: conn.getUpdateService().saveArray(links, conn.SERVICE_OPTS) except ValidationException: print("Failed to link for %s" % username)
def plate(request, gatewaywrapper): """Creates a Plate.""" gatewaywrapper.loginAsAuthor() gw = gatewaywrapper.gateway update_service = gw.getUpdateService() plate = PlateI() plate.name = rstring(uuid()) for well_index in range(3): well = WellI() well.row = rint(well_index**2) well.column = rint(well_index**3) for well_sample_index in range(2): well_sample = WellSampleI() image = ImageI() image.name = rstring('%s_%d' % (uuid(), well_sample_index)) image.acquisitionDate = rtime(0) well_sample.image = image well.addWellSample(well_sample) plate.addWell(well) plate_id, = update_service.saveAndReturnIds([plate]) return gw.getObject('Plate', plate_id)
def create_plate_wells(self, user1, rows, cols, plateacquisitions=1): """Return Plate with Wells.""" updateService = get_update_service(user1) plate = PlateI() plate.name = rstring('plate') plate = updateService.saveAndReturnObject(plate) # PlateAcquisitions for plate plate_acqs = [] for p in range(plateacquisitions): plate_acq = PlateAcquisitionI() plate_acq.name = rstring('plateacquisition_%s' % p) plate_acq.description = rstring('plateacquisition_description') plate_acq.maximumFieldCount = rint(3) plate_acq.startTime = rtime(1) plate_acq.endTime = rtime(2) plate_acq.plate = PlateI(plate.id.val, False) plate_acq = updateService.saveAndReturnObject(plate_acq) plate_acqs.append(plate_acq) # Create Wells for plate ref_frame = UnitsLength.REFERENCEFRAME for row in range(rows): for col in range(cols): # create Well well = WellI() well.column = rint(col) well.row = rint(row) well.plate = PlateI(plate.id.val, False) # Only wells in first Column have well-samples etc. if col == 0: # Have 3 images/well-samples per plateacquisition # (if no plateacquisitions, create 3 well-samples without) for p in range(max(1, plateacquisitions)): for i in range(3): image = self.create_test_image( size_x=5, size_y=5, session=user1[0].getSession()) ws = WellSampleI() ws.image = ImageI(image.id, False) ws.well = well ws.posX = LengthI(i * 10, ref_frame) ws.posY = LengthI(i, ref_frame) if p < len(plate_acqs): ws.setPlateAcquisition( PlateAcquisitionI(plate_acqs[p].id.val, False)) well.addWellSample(ws) updateService.saveObject(well) return plate
def duplicateMIF(self, orig_img): """ Use copyAndResizeImage to create a "synthetic" image (one without a fileset) """ new_img = self.pixels.copyAndResizeImage( orig_img.id.val, rint(16), rint(16), rint(1), rint(1), [0], None, True).val pix_id = unwrap(self.query.projection( "select p.id from Image i join i.pixels p where i.id = :id", ParametersI().addId(new_img)))[0][0] new_img = ImageI(new_img, False) new_pix = PixelsI(pix_id, False) return new_img, new_pix
def add_images_key_values(gateway, key_values, image_ids, group_id, description=None): """Adds some key:value pairs to a list of images""" map_data = _dict_to_map_annotation(key_values, description) data_manager, ctx = _data_manager_generator(gateway, group_id) # Link the data to the image if not hasattr(image_ids, '__iter__'): image_ids = [image_ids] for ID in image_ids: link = ImageAnnotationLinkI() link.setChild(map_data.asAnnotation()) link.setParent(ImageI(ID, False)) data_manager.saveAndReturnObject(ctx, link)
def add_image_tag(gateway, tag_text, image_id, description=None): """Adds a tag to an image""" data_manager = gateway.getFacility(DataManagerFacility) user = gateway.getLoggedInUser() ctx = SecurityContext(user.getGroupId()) # Arrange the data tag_data = TagAnnotationData(tag_text) if description: tag_data.setTagDescription(description) # Link the data to the image link = ImageAnnotationLinkI() link.setChild(tag_data.asAnnotation()) link.setParent(ImageI(image_id, False)) return data_manager.saveAndReturnObject(ctx, link)
def images_to_dataset(conn, params): figure_ids = params["Figure_IDs"] dataset_id = params["IDs"][0] dataset = conn.getObject("Dataset", dataset_id) if dataset is None: return "Dataset %s not found" % dataset_id, dataset gid = dataset.getDetails().group.id.val print("Dataset: %s, Group: %s" % (dataset.name, gid)) update = conn.getUpdateService() conn.SERVICE_OPTS.setOmeroGroup(-1) if len(figure_ids) == 0: return "Enter Figure IDs separated with a comma: '1,2'", dataset image_ids = [] for figure_id in figure_ids: file_ann = conn.getObject("FileAnnotation", figure_id) if file_ann is None: print("Figure File-Annotation %s not found" % figure_id) figure_json = b"".join(list(file_ann.getFileInChunks())) figure_json = figure_json.decode('utf8') json_data = json.loads(figure_json) image_ids.extend([p["imageId"] for p in json_data.get("panels")]) image_ids = list(set(image_ids)) if len(image_ids) == 0: return "No Images found. Check Info log", dataset print("Image IDs: %s" % image_ids) conn.SERVICE_OPTS.setOmeroGroup(gid) added_count = 0 for image_id in image_ids: link = DatasetImageLinkI() link.parent = DatasetI(dataset_id, False) link.child = ImageI(image_id, False) try: update.saveObject(link, conn.SERVICE_OPTS) added_count += 1 except Exception: print("Image %s not linked to Dataset. " "Link exists or permissions failed" % image_id) return "Added %s images to Dataset" % added_count, dataset
def chgrpImagesToTargetDataset(self, imgCount): """ Helper method to test chgrp of image(s) to target Dataset """ # One user in two groups client, user = self.new_client_and_user(perms=PRIVATE) admin = client.sf.getAdminService() target_grp = self.new_group([user], perms=PRIVATE) target_gid = target_grp.id.val images = self.importMIF(imgCount, client=client) ds = self.createDSInGroup(target_gid, client=client) # each chgrp includes a 'save' link to target dataset saves = [] ids = [] for i in images: ids.append(i.id.val) link = DatasetImageLinkI() link.child = ImageI(i.id.val, False) link.parent = DatasetI(ds.id.val, False) save = Save() save.obj = link saves.append(save) chgrp = Chgrp2(targetObjects={"Image": ids}, groupId=target_gid) requests = [chgrp] requests.extend(saves) self.doSubmit(requests, client, omero_group=target_gid) # Check Images moved to correct group queryService = client.sf.getQueryService() ctx = {'omero.group': '-1'} # query across groups for i in images: image = queryService.get('Image', i.id.val, ctx) img_gid = image.details.group.id.val assert target_gid == img_gid,\ "Image should be in group: %s, NOT %s" % (target_gid, img_gid) # Check Dataset has images linked dsImgs = client.sf.getContainerService().getImages( 'Dataset', [ds.id.val], None, ctx) assert len(dsImgs) == len(images),\ "All Images should be in target Dataset" previous_gid = admin.getEventContext().groupId return (ds, images, client, user, previous_gid, target_gid)
def dataset_images(self, user1): """Return Dataset with Images and an orphaned Image.""" dataset = DatasetI() dataset.name = rstring('Dataset') # Create 5 Images in Dataset for i in range(5): img = self.create_test_image(size_x=125, size_y=125, session=user1[0].getSession(), name="Image%s" % i) img = ImageI(img.id.val, False) dataset.linkImage(img) # Create a single orphaned Image image = self.create_test_image(size_x=125, size_y=125, session=user1[0].getSession()) dataset = get_update_service(user1).saveAndReturnObject(dataset) return dataset, image
def _parse_neo_roi(self, columns): """Parses out ROI from OmeroTables columns for 'NEO' datasets.""" log.debug("Parsing %s NEO ROIs..." % (len(columns[0].values))) image_ids = columns[self.IMAGE_COL].values rois = list() # Save our file annotation to the database so we can use an unloaded # annotation for the saveAndReturnIds that will be triggered below. self.file_annotation = \ self.update_service.saveAndReturnObject(self.file_annotation) unloaded_file_annotation = \ FileAnnotationI(self.file_annotation.id.val, False) batch_no = 1 batches = dict() for i, image_id in enumerate(image_ids): unloaded_image = ImageI(image_id, False) roi = RoiI() shape = EllipseI() values = columns[6].values diameter = rdouble(float(values[i])) shape.theZ = rint(0) shape.theT = rint(0) values = columns[4].values shape.cx = rdouble(float(values[i])) values = columns[3].values shape.cy = rdouble(float(values[i])) shape.rx = diameter shape.ry = diameter roi.addShape(shape) roi.image = unloaded_image roi.linkAnnotation(unloaded_file_annotation) rois.append(roi) if len(rois) == self.ROI_UPDATE_LIMIT: self.thread_pool.add_task( self.update_rois, rois, batches, batch_no) rois = list() batch_no += 1 self.thread_pool.add_task(self.update_rois, rois, batches, batch_no) self.thread_pool.wait_completion() batch_keys = batches.keys() batch_keys.sort() for k in batch_keys: columns[self.ROI_COL].values += batches[k]
def add_image_table(gateway, table_parameters, table_data, table_name, image_ids): """Appends a table to a project. :param gateway: a gateway to the omero server :param table_parameters: a list of 2 or 3 element-tuples containing: 1- a string with column name 2- a string with data type. Allowed values are: 'String', 'Long', 'Double' 3- optional, a string containing the description of the column :param table_data: a list of lists containing the data :param table_name: a string containing the table name. Must be unique :param image_ids: the id (of list of ids) of the target image(s) """ if not hasattr(image_ids, '__iter__'): image_ids = [image_ids] for image in image_ids: target = ImageI(image, False) try: _add_table(gateway, table_parameters, table_data, table_name, target) except Exception as e: print(e)
def create_points(conn, df, image): columns = [ "Sequence", "Tagged Protein", "Sequence is unique", "Location_X", "Location_Y" ] df2 = pandas.DataFrame(columns=(['Roi'] + columns)) index = df['Source Name'] == image.getName() for (s, tp, u, x, y) in zip(*map(lambda x: df[index][x], columns)): p = PointI() p.x = rdouble(float(x)) p.y = rdouble(float(y)) p.setTextValue(rstring(tp)) roi = RoiI() roi.addShape(p) roi.setName(rstring(tp)) roi.setImage(ImageI(image.getId(), False)) roi = conn.getUpdateService().saveAndReturnObject(roi) df2.loc[len(df2)] = (roi.getId().getValue(), s, tp, u, x, y) return df2
def create_image(image_index): image = ImageI() image.name = rstring('%s_%d' % (uuid(), image_index)) image.acquisitionDate = rtime(0) pixels = PixelsI() pixels.sha1 = rstring('') pixels.sizeX = rint(1) pixels.sizeY = rint(1) pixels.sizeZ = rint(1) pixels.sizeC = rint(1) pixels.sizeT = rint(1) pixels.dimensionOrder = DimensionOrderI(1L, False) # XYZCT pixels.pixelsType = PixelsTypeI(1L, False) # bit image.addPixels(pixels) return image
def create_image(image_id, with_pixels=False): image_format = FormatI(1L) image_format.value = rstring("PNG") image = ImageI() image.id = rlong(image_id) image.acquisitionDate = rtime(1L) image.archived = rbool(False) image.description = rstring("image_description_%d" % image_id) image.name = rstring("image_name_%d" % image_id) image.partial = rbool(False) image.series = rint(0) image.format = image_format if not with_pixels: return image dimension_order = DimensionOrderI(1L) dimension_order.value = rstring("XYZCT") pixels_type = PixelsTypeI(1L) pixels_type.value = "bit" pixels = PixelsI(1L) pixels.methodology = rstring("methodology") pixels.physicalSizeX = LengthI(1.0, UnitsLength.MICROMETER) pixels.physicalSizeY = LengthI(2.0, UnitsLength.MICROMETER) pixels.physicalSizeZ = LengthI(3.0, UnitsLength.MICROMETER) pixels.sha1 = rstring("61ee8b5601a84d5154387578466c8998848ba089") pixels.significantBits = rint(16) pixels.sizeX = rint(1) pixels.sizeY = rint(2) pixels.sizeZ = rint(3) pixels.sizeC = rint(4) pixels.sizeT = rint(5) pixels.timeIncrement = TimeI(1.0, UnitsTime.MILLISECOND) pixels.waveIncrement = rdouble(2.0) pixels.waveStart = rint(1) pixels.dimensionOrder = dimension_order pixels.pixelsType = pixels_type image.addPixels(pixels) contrast_method = ContrastMethodI(8L) contrast_method.value = rstring("Fluorescence") illumination = IlluminationI(1L) illumination.value = rstring("Transmitted") acquisition_mode = AcquisitionModeI(1L) acquisition_mode.value = rstring("WideField") photometric_interpretation = PhotometricInterpretationI(1L) photometric_interpretation.value = rstring("RGB") channel_1 = ChannelI(1L) channel_1.alpha = rint(255) channel_1.blue = rint(0) channel_1.green = rint(255) channel_1.red = rint(0) channel_1.lookupTable = rstring("rainbow") logical_channel_1 = LogicalChannelI(1L) logical_channel_1.emissionWave = LengthI(509.0, UnitsLength.NANOMETER) logical_channel_1.excitationWave = LengthI(488.0, UnitsLength.NANOMETER) logical_channel_1.fluor = rstring("GFP") logical_channel_1.name = rstring("GFP/488") logical_channel_1.ndFilter = rdouble(1.0) logical_channel_1.pinHoleSize = LengthI(1.0, UnitsLength.NANOMETER) logical_channel_1.pockelCellSetting = rint(0) logical_channel_1.samplesPerPixel = rint(2) logical_channel_1.contrastMethod = contrast_method logical_channel_1.illumination = illumination logical_channel_1.mode = acquisition_mode logical_channel_1.photometricInterpretation = photometric_interpretation channel_1.logicalChannel = logical_channel_1 channel_2 = ChannelI(2L) channel_2.alpha = rint(255) channel_2.blue = rint(255) channel_2.green = rint(0) channel_2.red = rint(0) channel_2.lookupTable = rstring("rainbow") logical_channel_2 = LogicalChannelI(2L) logical_channel_2.emissionWave = LengthI(470.0, UnitsLength.NANOMETER) logical_channel_2.excitationWave = LengthI(405.0, UnitsLength.NANOMETER) logical_channel_2.fluor = rstring("DAPI") logical_channel_2.name = rstring("DAPI/405") logical_channel_2.ndFilter = rdouble(1.0) logical_channel_2.pinHoleSize = LengthI(2.0, UnitsLength.NANOMETER) logical_channel_2.pockelCellSetting = rint(0) logical_channel_2.samplesPerPixel = rint(2) logical_channel_2.contrastMethod = contrast_method logical_channel_2.illumination = illumination logical_channel_2.mode = acquisition_mode logical_channel_2.photometricInterpretation = photometric_interpretation channel_2.logicalChannel = logical_channel_2 pixels.addChannel(channel_1) pixels.addChannel(channel_2) return image
def buildFigure(self): """ The main building of the figure happens here, independently of format. We set up directories as needed, call createFigure() to create the PDF or TIFF then iterate through figure pages, adding panels for each page. Then we add an info page and create a zip of everything if needed. Finally the created file or zip is uploaded to OMERO and attached as a file annotation to all the images in the figure. """ # test to see if we've got multiple pages page_count = 'page_count' in self.figure_json and self.figure_json[ 'page_count'] or 1 self.page_count = int(page_count) paper_spacing = 'paper_spacing' in self.figure_json and self.figure_json[ 'paper_spacing'] or 50 page_col_count = 'page_col_count' in self.figure_json and self.figure_json[ 'page_col_count'] or 1 # Create a zip if we have multiple TIFF pages or we're exporting Images export_option = self.scriptParams['Export_Option'] createZip = False if self.exportImages: createZip = True if (self.page_count > 1) and (export_option.startswith("TIFF")): createZip = True # somewhere to put PDF and images self.zip_folder_name = None if createZip: self.zip_folder_name = "figure" curr_dir = os.getcwd() zipDir = os.path.join(curr_dir, self.zip_folder_name) os.mkdir(zipDir) if self.exportImages: for d in (ORIGINAL_DIR, RESAMPLED_DIR, FINAL_DIR): imgDir = os.path.join(zipDir, d) os.mkdir(imgDir) self.addReadMeFile() # Create the figure file(s) self.createFigure() panels_json = self.figure_json['panels'] imageIds = set() groupId = None # We get our group from the first image id1 = panels_json[0]['imageId'] groupId = self.conn.getObject("Image", id1).getDetails().group.id.val # For each page, add panels... col = 0 row = 0 for p in range(self.page_count): print "\n------------------------- PAGE ", p + 1, "--------------------------" px = col * (self.pageWidth + paper_spacing) py = row * (self.pageHeight + paper_spacing) page = {'x': px, 'y': py} # if export_option == "TIFF": # add_panels_to_tiff(conn, tiffFigure, panels_json, imageIds, page) # elif export_option == "PDF": self.add_panels_to_page(panels_json, imageIds, page) # complete page and save self.savePage() col = col + 1 if col >= page_col_count: col = 0 row = row + 1 # Add thumbnails and links page self.addInfoPage(panels_json) # Saves the completed figure file self.saveFigure() # PDF will get created in this group if groupId is None: groupId = self.conn.getEventContext().groupId self.conn.SERVICE_OPTS.setOmeroGroup(groupId) outputFile = self.figureFileName ns = self.ns mimetype = self.mimetype if self.zip_folder_name is not None: zipName = self.getZipName() # Recursively zip everything up compress(zipName, self.zip_folder_name) outputFile = zipName ns = "omero.web.figure.zip" mimetype = "application/zip" fileAnn = self.conn.createFileAnnfromLocalFile(outputFile, mimetype=mimetype, ns=ns) links = [] for iid in list(imageIds): print "linking to", iid link = ImageAnnotationLinkI() link.parent = ImageI(iid, False) link.child = fileAnn._obj links.append(link) if len(links) > 0: # Don't want to fail at this point due to strange permissions combo try: links = self.conn.getUpdateService().saveAndReturnArray( links, self.conn.SERVICE_OPTS) except: print "Failed to attach figure: %s to images %s" % (fileAnn, imageIds) return fileAnn
def process_images(conn, script_params): file_anns = [] message = "" # Get the images images, log_message = script_utils.get_objects(conn, script_params) message += log_message if not images: return None, message # Check for line and polyline ROIs and filter images list images = [ image for image in images if image.getROICount(["Polyline", "Line"]) > 0 ] if not images: message += "No ROI containing line or polyline was found." return None, message csv_data = [] for image in images: if image.getSizeT() > 1: message += "%s ID: %s appears to be a time-lapse Image," \ " not a kymograph." % (image.getName(), image.getId()) continue roi_service = conn.getRoiService() result = roi_service.findByImage(image.getId(), None) secs_per_pixel_y = image.getPixelSizeY() microns_per_pixel_x = image.getPixelSizeX() if secs_per_pixel_y and microns_per_pixel_x: microns_per_sec = microns_per_pixel_x / secs_per_pixel_y else: microns_per_sec = None # for each line or polyline, create a row in csv table: y(t), x, # dy(dt), dx, x/t (line), x/t (average) col_names = "\nt_start (pixels), x_start (pixels), t_end (pixels)," \ " x_end (pixels), dt (pixels), dx (pixels), x/t, speed(um/sec)," \ "avg x/t, avg speed(um/sec)" table_data = "" for roi in result.rois: for s in roi.copyShapes(): if s is None: continue # seems possible in some situations if type(s) == omero.model.LineI: table_data += "\nLine ID: %s" % s.getId().getValue() x1 = s.getX1().getValue() x2 = s.getX2().getValue() y1 = s.getY1().getValue() y2 = s.getY2().getValue() dx = abs(x1 - x2) dy = abs(y1 - y2) dx_per_y = float(dx) / dy speed = "" if microns_per_sec: speed = dx_per_y * microns_per_sec table_data += "\n" table_data += ",".join([ str(x) for x in (y1, x1, y2, x2, dy, dx, dx_per_y, speed) ]) elif type(s) == omero.model.PolylineI: table_data += "\nPolyline ID: %s" % s.getId().getValue() v = s.getPoints().getValue() points = roi_utils.points_string_to_xy_list(v) x_start, y_start = points[0] for i in range(1, len(points)): x1, y1 = points[i - 1] x2, y2 = points[i] dx = abs(x1 - x2) dy = abs(y1 - y2) dx_per_y = float(dx) / dy av_x_per_y = abs(float(x2 - x_start) / (y2 - y_start)) speed = "" avg_speed = "" if microns_per_sec: speed = dx_per_y * microns_per_sec avg_speed = av_x_per_y * microns_per_sec table_data += "\n" table_data += ",".join([ str(x) for x in (y1, x1, y2, x2, dy, dx, dx_per_y, speed, av_x_per_y, avg_speed) ]) # write table data to csv... if len(table_data) > 0: table_string = "Image ID:, %s," % image.getId() table_string += "Name:, %s" % image.getName() table_string += "\nsecsPerPixelY: %s" % secs_per_pixel_y table_string += '\nmicronsPerPixelX: %s' % microns_per_pixel_x table_string += "\n" table_string += col_names table_string += table_data csv_data.append(table_string) iids = [str(i.getId()) for i in images] to_link_csv = [i.getId() for i in images if i.canAnnotate()] csv_file_name = 'kymograph_velocities_%s.csv' % "-".join(iids) with open(csv_file_name, 'w') as csv_file: csv_file.write("\n \n".join(csv_data)) file_ann = conn.createFileAnnfromLocalFile(csv_file_name, mimetype="text/csv") fa_message = "Created Line Plot csv (Excel) file" links = [] if len(to_link_csv) == 0: fa_message += " but could not attach to images." for iid in to_link_csv: link = ImageAnnotationLinkI() link.parent = ImageI(iid, False) link.child = file_ann._obj links.append(link) if len(links) > 0: links = conn.getUpdateService().saveAndReturnArray(links) if file_ann: file_anns.append(file_ann) if not file_anns: fa_message = "No Analysis files created. See 'Info' or 'Error'" \ " for more details" elif len(file_anns) > 1: fa_message = "Created %s csv (Excel) files" % len(file_anns) message += fa_message return file_anns, message
def __init__(self, image=None): DataObject.__init__(self) if(image is None): self.setValue(ImageI()) else: self.setValue(image)
def get_image_tables_file_names(gateway, image_id): """Returns a list with all the table names available in the project""" target = ImageI(image_id, False) tables = _get_available_tables(gateway, target) return [t.getFileName() for t in tables]