def create_dataset(connection, dataset_name, dataset_description=None, parent_project=None): new_dataset = model.DatasetI() new_dataset.setName(rtypes.rstring(dataset_name)) if dataset_description: new_dataset.setDescription(rtypes.rstring(dataset_description)) new_dataset = connection.getUpdateService().saveAndReturnObject(new_dataset) if parent_project: link = model.ProjectDatasetLinkI() link.setChild(model.DatasetI(new_dataset.getId(), False)) link.setParent(model.ProjectI(parent_project.getId(), False)) connection.getUpdateService().saveObject(link) return new_dataset
def link_image_to_dataset(connection, image, dataset): link = model.DatasetImageLinkI() link.setParent(model.DatasetI(dataset.getId(), False)) link.setChild(model.ImageI(image.getId(), False)) connection.getUpdateService().saveObject(link) return
def link_dataset_to_project(connection, dataset, project): link = model.ProjectDatasetLinkI() link.setParent(model.ProjectI( project.getId(), False)) # linking to a loaded project might raise exception link.setChild(model.DatasetI(dataset.getId(), False)) connection.getUpdateService().saveObject(link) return
def create_dataset(connection, name, description=None, parent_project=None): new_dataset = gw.DatasetWrapper(connection, model.DatasetI()) new_dataset.setName(name) if description: new_dataset.setDescription(description) new_dataset.save() if parent_project: link_dataset_to_project(connection, new_dataset, parent_project) return new_dataset
def create_dataset(self, dataset_name, dataset_description=None): dataset_obj = model.DatasetI() dataset_obj.setName(rtypes.rstring(dataset_name)) if dataset_description is not None: dataset_obj.setDescription(dataset_description) dataset_obj = self.SESSION.getUpdateService().saveAndReturnObject( dataset_obj) dataset_id = dataset_obj.getId().getValue() return dataset_obj
def create_table(self, dataset_id, dataframe, table_name): columns = dataframe.columns resources = self.SESSION.sharedResources() # resources = conn.c.sf.sharedResources() repository_id = resources.repositories().descriptions[0].getId( ).getValue() # create columns and data types, and populate with data data_types = dataframe.dtypes table_data = [] init_cols = [] for index, col in enumerate(dataframe.columns): if dataframe[col].dtype == object: max_len = dataframe[col].str.len().max() init_col = grid.StringColumn(col, '', max_len, []) init_cols.append(init_col) data_col = grid.StringColumn( col, '', max_len, list(dataframe.iloc[:, index].values)) table_data.append(data_col) table = resources.newTable(repository_id, ''.join(["/", table_name, ".h5"])) # table = resources.newTable(dataset_id, table_name) table.initialize(init_cols) table.addData(table_data) # note that this worked after the table.close() statement was invoked in 5.4.10 orig_file = table.getOriginalFile() table.close() # when we are done, close. orig_file_id = orig_file.id.val # ...so you can attach this data to an object e.g. Dataset file_ann = model.FileAnnotationI() # use unloaded OriginalFileI file_ann.setFile(model.OriginalFileI(orig_file_id, False)) file_ann = self.SESSION.getUpdateService().saveAndReturnObject( file_ann) link = model.DatasetAnnotationLinkI() link.setParent(model.DatasetI(dataset_id, False)) link.setChild(model.FileAnnotationI(file_ann.getId().getValue(), False)) table = self.SESSION.getUpdateService().saveAndReturnObject(link) return table
def update_object_tag(client, objects_list, tag_id): for object in objects_list: print(object) print(tag_id) print(model.TagAnnotationI(tag_id, False)) print(object.getId()) link = None if isinstance(object, model.DatasetI): print('here') link = model.DatasetAnnotationLinkI() link.setParent(model.DatasetI(object.getId(), False)) link.setChild(model.TagAnnotationI(tag_id, False)) elif isinstance(object, model.ImageI): link = model.ImageAnnotationLinkI() link.setParent(model.ImageI(object.getId(), False)) link.setChild(model.TagAnnotationI(tag_id, False)) tag_link = client.getSession().getUpdateService().saveAndReturnObject( link)
def add_tags(self, tag_values, object_type, object_id): update_service = self.SESSION.getUpdateService() for tag_value in tag_values: # tags shouldn't contain commas? split_tag_values = [x.strip() for x in tag_value.split(',')] # ignore empty tag strings for split_tag_value in split_tag_values: if len(split_tag_value.strip()) < 1: continue # TODO: add check here to see if tag with specific value already exists and link it if so # use stateless update service instead of blitz new_tag_anno = model.TagAnnotationI() # TODO: determine what description the tag annotation should have; e.g. date, strain # Use 'client' namespace to allow editing in Insight & web new_tag_anno.setTextValue(rtypes.rstring(split_tag_value)) tag_anno = update_service.saveAndReturnObject(new_tag_anno) # do link with parent object om_object = self.retrieve_objects(object_type, [int(object_id)]) link = None if object_type == str(OMERODataType.project): link = model.ProjectAnnotationLinkI() link.parent = model.ProjectI(object_id, False) elif object_type == str(OMERODataType.dataset): link = model.DatasetAnnotationLinkI() link.parent = model.DatasetI(object_id, False) elif object_type == str(OMERODataType.image): link = model.ImageAnnotationLinkI() link.parent = model.ImageI(object_id, False) link.child = model.TagAnnotationI(tag_anno.id, False) update_service.saveAndReturnObject(link)
def update_object_tag(self, client, objects_list, tag_id, dry_run=False): for object in objects_list: link = None output_str = "Tag ID to link object to: {}".format(tag_id) logging.info(output_str) print(output_str) if isinstance(object, omero.model.DatasetI): output_str = "Dataset object ID to link tag to: {}".format( object.getId().getValue()) logging.info(output_str) print(output_str) link = model.DatasetAnnotationLinkI() link.setParent(model.DatasetI(object.getId(), False)) link.setChild(model.TagAnnotationI(tag_id, False)) elif isinstance(object, omero.model.ImageI): output_str = "Image object ID to link tag to: {}".format( object.getId().getValue()) logging.info(output_str) print(output_str) link = model.ImageAnnotationLinkI() link.setParent(model.ImageI(object.getId(), False)) link.setChild(model.TagAnnotationI(tag_id, False)) if dry_run == False: try: tag_link = client.getSession().getUpdateService( ).saveAndReturnObject(link) except omero.ValidationException: # catch error if there's already a link between objects; this happens if the objects # have been retrieved by tag label text, rather than by ID, for example print( 'Error linking tag ID {} and object ID {}; probably already linked' .format(tag_id, object.getId().getValue())) logging.error( 'Error linking tag ID {} and object ID {}; probably already linked' .format(tag_id, object.getId().getValue()))
def add_kvps(self, key_value_data, object_type, object_id): # use stateless update service instead of blitz new_map_anno = model.MapAnnotationI() # Use 'client' namespace to allow editing in Insight & web namespace = constants.metadata.NSCLIENTMAPANNOTATION new_map_anno.setNs(rtypes.rstring(namespace)) # key_value_data = [{i[0]: i[1:]} for i in key_value_data] key_value_data = [ model.NamedValue(i[0], str(i[1:])) for i in key_value_data ] # key_value_data = [model.NamedValue(i[0], i[1:]) for i in key_value_data] new_map_anno.setMapValue(key_value_data) update_service = self.SESSION.getUpdateService() map_anno = update_service.saveAndReturnObject(new_map_anno) # do link with parent object om_object = self.retrieve_objects(object_type, [int(object_id)]) link = None if object_type == str(OMERODataType.project): link = model.ProjectAnnotationLinkI() link.parent = model.ProjectI(object_id, False) elif object_type == str(OMERODataType.dataset): link = model.DatasetAnnotationLinkI() link.parent = model.DatasetI(object_id, False) elif object_type == str(OMERODataType.image): link = model.ImageAnnotationLinkI() link.parent = model.ImageI(object_id, False) link.child = model.MapAnnotationI(map_anno.id, False) update_service.saveAndReturnObject(link)
def upload_dir_as_images(self, omero_session, query_service, update_service, pixels_service, path, dataset=None, convert_to_uint16=False): """ Reads all the images in the directory specified by 'path' and uploads them to OMERO as a single multi-dimensional image, placed in the specified 'dataset' Uses regex to determine the Z, C, T position of each image by name, and therefore determines sizeZ, sizeC, sizeT of the new Image. @param path the path to the directory containing images. @param dataset the OMERO dataset, if we want to put images somewhere. omero.model.DatasetI """ # assume 1 image in this folder for now. # Make a single map of all images. key is (z,c,t). Value is image path. imageMap = {} channelSet = set() tokens = [] # other parameters we need to determine sizeZ = 1 sizeC = 1 sizeT = 1 zStart = 1 # could be 0 or 1 ? tStart = 1 fullpath = None rgb = False # process the names and populate our imagemap for f in os.listdir(path): fullpath = os.path.join(path, f) if f.lower().endswith( ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff')): import filetype ftype = filetype.guess(fullpath) if ftype is None: PROCESSING_LOG.error('Cannot guess file type!') continue PROCESSING_LOG.debug('File extension: %s' % ftype.extension) PROCESSING_LOG.debug('File MIME type: %s' % ftype.mime) if ftype.mime not in ACCEPTED_MIME_TYPES: continue search_res = self.run_regex_search(fullpath, f) tSearch, cSearch, zSearch, tokSearch, posSearch = \ itemgetter('tSearch', 'cSearch', 'zSearch', 'tokSearch', 'posSearch')(search_res) pos = posSearch.group('pos') if f.endswith(".jpg"): rgb = True if tSearch is None: theT = 0 else: theT = int(tSearch.group('T')) if cSearch is None: cName = "0" else: cName = cSearch.group('C') if zSearch is None: theZ = 0 else: theZ = int(zSearch.group('Z')) channelSet.add(cName) sizeZ = max(sizeZ, theZ) zStart = min(zStart, theZ) sizeT = max(sizeT, theT) tStart = min(tStart, theT) if tokSearch is not None: tokens.append(tokSearch.group('Token')) imageMap[(theZ, cName, theT)] = fullpath chans_map = self.find_channel_map(rgb, channelSet) channels, colourMap = itemgetter('channels', 'colourMap')(chans_map) sizeC = len(channels) # use the common stem as the image name # imageName = os.path.commonprefix(tokens).strip('0T_') # imageName = os.path.commonprefix(tokens).strip('_00') imageName = pos description = "Imported from images in %s" % path PROCESSING_LOG.info("Creating image: %s" % imageName) # use the last image to get X, Y sizes and pixel type if rgb: plane = script_utils.getPlaneFromImage(fullpath, 0) else: plane = script_utils.getPlaneFromImage(fullpath) pixelsType = self.get_pixels_type(plane, query_service, convert_to_uint16) sizeY, sizeX = plane.shape PROCESSING_LOG.debug( "sizeX: %s sizeY: %s sizeZ: %s sizeC: %s sizeT: %s" % (sizeX, sizeY, sizeZ, sizeC, sizeT)) # code below here is very similar to combineImages.py # create an image in OMERO and populate the planes with numpy 2D arrays channelList = list(range(sizeC)) imageId = pixels_service.createImage(sizeX, sizeY, sizeZ, sizeT, channelList, pixelsType, imageName, description) pixelsId = self.upload_image_pixels(imageId, query_service, omero_session, pixels_service, rgb, imageMap, sizeC, sizeZ, sizeT, sizeX, sizeY, channels, colourMap, convert_to_uint16) # add channel names pixels = pixels_service.retrievePixDescription(pixelsId) i = 0 # c is an instance of omero.model.ChannelI for c in pixels.iterateChannels(): # returns omero.model.LogicalChannelI lc = c.getLogicalChannel() lc.setName(rtypes.rstring(channels[i])) update_service.saveObject(lc) i += 1 # put the image in dataset, if specified. if dataset: link = model.DatasetImageLinkI() link.parent = model.DatasetI(dataset.id.val, False) link.child = model.ImageI(imageId, False) update_service.saveAndReturnObject(link) print(': '.join( ["Hypercube successfully uploaded", str(imageId.getValue())])) return imageId