def uploadImageToDataset(session, localImage, dataset=None, description="", imageName=None): """ Uploads a local Spider image to an OMERO dataset. Same function exists in spider2omero.py. @param services Map of OMERO services @param pixelsType The OMERO PixelsType object for new image. @param imageName The local image path/name. Also used for new image name. @param dataset Dataset to put images in, if specified. omero.model.Dataset """ if imageName == None: imageName = localImage print "Importing image: %s" % imageName em = EMData() em.read_image(localImage) npArray = EMNumPy.em2numpy(em) if len(npArray.shape) < 3: plane2Dlist = [npArray] else: plane2Dlist = npArray image = scriptUtil.createNewImage(session, plane2Dlist, imageName, description, dataset) return image
def createTestImage(session, imageName="imageName"): plane2D = arange(256, dtype=uint8).reshape(16, 16) image = scriptUtil.createNewImage(session, [plane2D], imageName, "description", dataset=None) return image.getId().getValue()
def createTestImage(session): plane2D = arange(256, dtype=uint8).reshape(16, 16) image = scriptUtil.createNewImage(session, [plane2D], "imageName", "description", dataset=None) return image.getId().getValue()
def uploadImageToDataset(services, pixelsType, imageArray, imageName, dataset=None): """ Uploads a local Spider image to an OMERO dataset. Same function exists in spider2omero.py. @param services Map of OMERO services @param pixelsType The OMERO PixelsType object for new image. @param imageArray Numpy array of pixel data - 2D @param imageName The local file, for getting image header info @param dataset Dataset to put images in, if specified. omero.model.Dataset """ session = services["session"] queryService = services["queryService"] updateService = services["updateService"] rawFileStore = services["rawFileStore"] namespace = omero.constants.namespaces.NSCOMPANIONFILE fileName = omero.constants.annotation.file.ORIGINALMETADATA print "Importing image: %s" % imageName description = "" if len(imageArray.shape) > 2: plane2Dlist = imageArray # 3D array already. TODO: Need to check that volume is not mirrored (Z in correct order) else: plane2Dlist = [imageArray] # single plane image name = os.path.basename(imageName) image = scriptUtil.createNewImage(session, plane2Dlist, name, description, dataset) # header is a list of values corresponding to attributes header = getSpiderHeader(imageName) # if we know the pixel size, set it in the new image if len(header) >= 38: physicalSizeX = header[38] physicalSizeY = header[38] pixels = image.getPrimaryPixels() pixels.setPhysicalSizeX(rdouble(physicalSizeX)) pixels.setPhysicalSizeY(rdouble(physicalSizeY)) updateService.saveObject(pixels) # make a temp text file. f = open(fileName, 'w') f.write("[GlobalMetadata]\n") # now add image attributes as "Original Metadata", sorted by key. for i, h in enumerate(header): if i in spiderHeaderMap: f.write("%s=%s\n" % (spiderHeaderMap[i], h)) f.close() scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, fileName, "text/plain", None, namespace) # delete temp file os.remove(fileName)
def uploadImageToDataset(session, services, pixelsType, localImage, dataset=None, description="", imageName=None): """ Uploads a local Spider image to an OMERO dataset. Same function exists in spider2omero.py. @param services Map of OMERO services @param pixelsType The OMERO PixelsType object for new image. @param imageName The local image path/name. Also used for new image name. @param dataset Dataset to put images in, if specified. omero.model.Dataset """ renderingEngine = services["renderingEngine"] queryService = services["queryService"] pixelsService = services["pixelsService"] rawPixelStore = services["rawPixelStore"] updateService = services["updateService"] rawFileStore = services["rawFileStore"] namespace = omero.constants.namespaces.NSCOMPANIONFILE fileName = omero.constants.annotation.file.ORIGINALMETADATA if imageName == None: imageName = localImage print "Importing image: %s" % imageName plane2D = spider2array(localImage) plane2Dlist = [plane2D] # single plane image image = scriptUtil.createNewImage(session, plane2Dlist, imageName, description, dataset) # header is a list of values corresponding to attributes header = getSpiderHeader(localImage) # if we know the pixel size, set it in the new image if len(header) >= 38: physicalSizeX = header[38] physicalSizeY = header[38] pixels = image.getPrimaryPixels() pixels.setPhysicalSizeX(rdouble(physicalSizeX)) pixels.setPhysicalSizeY(rdouble(physicalSizeY)) updateService.saveObject(pixels) # make a temp text file. f = open(fileName, 'w') f.write("[GlobalMetadata]\n") # now add image attributes as "Original Metadata", sorted by key. for i, h in enumerate(header): if i in spiderHeaderMap: f.write("%s=%s\n" % (spiderHeaderMap[i], h)) f.close() scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, fileName, "text/plain", None, namespace) # delete temp file os.remove(fileName) return image
def importImage(session, imagePath, imageName=None, planeData=None): if imagePath != None: data = getPlaneFromImage(imagePath) if len(data.shape) == 3: plane2D = data[0] # this actually slices the wrong way. E.g. Gives a row with 3 channels. else: plane2D = data else: plane2D = planeData if imageName == None: imageName = imagePath image = scriptUtil.createNewImage(session, [plane2D], imageName, "description", dataset=None) return image
def upload_image(self, file_to_upload, dataset, import_original=True, cli=None): valid_image = False file_mime_type = None image = None if file_to_upload.lower().endswith( ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff')): import filetype ftype = filetype.guess(file_to_upload) if ftype is None: BROKER_LOG.error('Cannot guess file type!') valid_image = False BROKER_LOG.debug('File extension: %s' % ftype.extension) BROKER_LOG.debug('File MIME type: %s' % ftype.mime) if ftype.mime not in self.ACCEPTED_MIME_TYPES: valid_image = False else: valid_image = True file_mime_type = ftype.mime else: valid_image = False if valid_image == True: # convert image to 2DArray for plane im = Image.open(file_to_upload) # planes = [np.array(im)] filename_w_ext = os.path.basename(file_to_upload) filename, file_extension = os.path.splitext(filename_w_ext) if import_original == True and cli is not None: # use the function that follows if uploading images as original files (i.e. as imports) # conn = gateway.BlitzGateway(client_obj=self.CLIENT) # conn = self.get_connection() if dataset: target = "Dataset:id:" + str(dataset.id.getValue()) cli.onecmd([ "import", "--clientdir", "/home/jovyan/work/OMERO.server-5.4.10-ice36-b105/lib/client", '-T', target, '--description', "an image", "--quiet", '--no-upgrade-check', file_to_upload ]) else: cli.onecmd([ "import", "--clientdir", "/home/jovyan/work/OMERO.server-5.4.10-ice36-b105/lib/client", '--description', "an image", '--no-upgrade-check', "--quiet", file_to_upload ]) else: planes = script_utils.getPlaneFromImage( imagePath=file_to_upload, rgbIndex=None) # Use below function if uploading images in RawPixelsStore format (i.e. not the original file import) image = script_utils.createNewImage(self.SESSION, [planes], filename, "An image", dataset) if image is not None: print(': '.join([ "Image file successfully uploaded", str(image.getId().getValue()) ])) return image.getId().getValue() else: return
def uploadBdbAsDataset(infile, dataset): """ @param infile path to bdb (absolute OR from where we are running) OR this can be a list of image paths. @param dataset Dataset to put images in (omero.model.DatasetI) """ imageList = None # particleExt will be "ptcls" or "flip" or "wiener" if we are importing original particles # particleExt will be "data" or "flipped" or "filtered" if we are importing sets particles particleExt = None nimg = 0 try: nimg = EMUtil.get_image_count(infile) # eg images in bdb 'folder' particleExt = infile.split("_")[-1] print "Found %d %s images to import from: %s to dataset: %s" % (nimg, particleExt, infile, dataset.name.val) except: nimg = len(infile) # OK, we're probably dealing with a list imageList = infile print "Importing %d images to dataset: %s" % (nimg, dataset.name.val) if nimg == 0: return d = EMData() # use first image to get data-type (assume all the same!) if imageList: d.read_image(imageList[0]) else: d.read_image(infile, 0) plane2D = EMNumPy.em2numpy(d) pType = plane2D.dtype.name print pType pixelsType = queryService.findByQuery("from PixelsType as p where p.value='%s'" % pType, None) # omero::model::PixelsType if pixelsType == None and pType.startswith("float"): # try 'float' pixelsType = queryService.findByQuery("from PixelsType as p where p.value='%s'" % "float", None) # omero::model::PixelsType if pixelsType == None: print "Unknown pixels type for: " % pType return else: print "Using pixels type ", pixelsType.getValue().getValue() # identify the original metadata file with these values namespace = omero.constants.namespaces.NSCOMPANIONFILE origFilePath = omero.constants.annotation.file.ORIGINALMETADATAPREFIX #"/openmicroscopy.org/omero/image_files/" fileName = omero.constants.annotation.file.ORIGINALMETADATA # loop through all the images. nimg = min(50, nimg) for i in range(nimg): description = "Imported from EMAN2 bdb: %s" % infile newImageName = "" if imageList: h, newImageName = os.path.split(imageList[i]) print "\nReading image: %s (%s / %s)" % (imageList[i], i, nimg) d.read_image(imageList[i]) else: newImageName = "%d" % i print "\nReading image: %s / %s" % (i, nimg) d.read_image(infile, i) plane2D = EMNumPy.em2numpy(d) #display(d) #plane2D *= 100 # temporary hack to avoid rendering problem with small numbers. #planeMin = int(plane2D.min()) #plane2D -= planeMin # make min = 0 #print plane2D plane2Dlist = [plane2D] # single plane image # test attributes for source image link attributes = d.get_attr_dict() particleSource = "" if "ptcl_source_image" in attributes: parentName = attributes["ptcl_source_image"] newImageName = parentName # name the particle after it's parent description = description + "\nSource Image: %s" % parentName particleSource += parentName if parentName in newImageMap: #print "Add link to image named: ", parentName # simply add to description, since we don't have Image-Image links yet description = description + "\nSource Image ID: %s" % newImageMap[parentName] if "ptcl_source_coord" in attributes: try: x, y = attributes["ptcl_source_coord"] particleSource = "%s.%d.%d" % (particleSource, x, y) xCoord = float(x) yCoord = float(y) description = description + "\nSource Coordinates: %.1f, %.1f" % (xCoord, yCoord) except: pass # if we are importing the reference images for class averages, add link to original particle if particleExt != None and particleExt.endswith("all4"): particleid = "%s.%s" % (particleSource, "ptcls") # 'ptcls' links to original particles. print "Adding link from all4 to original particle", particleid if particleid in newImageMap: description = description + "\nParticle Image ID: %s" % newImageMap[particleid] # if this particle has been imported already, simple put it in the dataset... if "data_path" in attributes: if particleExt in particleSetExtMap: # E.g. "data" originalParticleExt = particleSetExtMap[particleExt] # E.g. "ptcls" particleSource += ".%s" % originalParticleExt if particleSource in newImageMap: print particleSource, "already imported..." particleId = newImageMap[particleSource] link = omero.model.DatasetImageLinkI() link.parent = omero.model.DatasetI(dataset.id.val, False) link.child = omero.model.ImageI(particleId, False) updateService.saveAndReturnObject(link) continue # if we are dealing with a class average: if "class_ptcl_idxs" in attributes: particleIndexes = attributes["class_ptcl_idxs"] omeroIds = [] for index in particleIndexes: if index in all4map: omeroIds.append(all4map[index]) ds = createDataset("class %s"%i, project=None, imageIds=omeroIds) description += "\nMember particles in Dataset ID: %s" % ds.id.val # create new Image from numpy data. print "Creating image in OMERO and uploading data..." image = scriptUtil.createNewImage(session, plane2Dlist, newImageName, description, dataset) imageId = image.getId().getValue() # if we know the pixel size, set it in the new image if "apix_x" in attributes: physicalSizeX = float(attributes["apix_x"]) print "physicalSizeX" , physicalSizeX if "apix_y" in attributes: physicalSizeY = float(attributes["apix_y"]) print "physicalSizeY" , physicalSizeY else: physicalSizeY = physicalSizeX pixels = image.getPrimaryPixels() pixels.setPhysicalSizeX(rdouble(physicalSizeX)) pixels.setPhysicalSizeY(rdouble(physicalSizeY)) updateService.saveObject(pixels) # make a map of name: imageId, for creating image links if particleExt != None and particleExt.endswith("all4"): all4map[i] = imageId elif particleSource: particleSource += ".%s" % particleExt print particleSource, "added to map" newImageMap[particleSource] = imageId else: print newImageName, "added to map" newImageMap[newImageName] = imageId f = open(fileName, 'w') # will overwrite each time. f.write("[GlobalMetadata]\n") # now add image attributes as "Original Metadata", sorted by key. keyList = list(attributes.keys()) keyList.sort() for k in keyList: #print k, attributes[k] f.write("%s=%s\n" % (k, attributes[k])) f.close() filePath = "%s%s/%s" % (origFilePath, imageId, fileName) print "Uploading %s to Image: %s with path: %s" % (fileName, imageId, filePath) scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, fileName, "text/plain", None, namespace, filePath) # delete temp file if os.path.exists(fileName): os.remove(fileName)
def uploadBdbsAsDataset(services, bdbContainer, imageIds, project = None, info = None): """ This method takes a folder that contains multiple bdbs, each representing different ctf output. This is what is generated by the e2ctf.py command, when it is running on images not in a bdb. Output images (in the form of bdbs) are placed in a 'particles' folder in the current directory. This method uploads all the images in the given directory into a new dataset per bdb. @param bdbContainer path to bdb. In this script, it is a relative path (folder name) E.g. 'particles' @param imageIds The OMERO image-ids that these particles come from. Add to descriptions @param project if specified, put each dataset into this project (omero.model.ProjectI) @param info Extra info to add to dataset description and image descriptions. """ re = services["renderingEngine"] queryService = services["queryService"] pixelsService = services["pixelsService"] rawPixelStore = services["rawPixelsStore"] updateService = services["updateService"] rawFileStore = services["rawFileStore"] # get the list of bdbs dbs = db_list_dicts('bdb:%s' % bdbContainer) print dbs if len(dbs) == 0: print "No bdb images to upload" return d = EMData() # use first image to get data-type (assume all the same!) dbpath = "bdb:particles#%s" % dbs[0] d.read_image(dbpath, 0) plane2D = EMNumPy.em2numpy(d) namespace = omero.constants.namespaces.NSCOMPANIONFILE fileName = omero.constants.annotation.file.ORIGINALMETADATA # loop through all the images. datasets = [] for db in dbs: dbpath = "bdb:particles#%s" % db nimg = EMUtil.get_image_count(dbpath) # eg images in bdb 'folder' print "Found %d images to import from: %s" % (nimg, dbpath) # make a dataset for images dataset = omero.model.DatasetI() dataset.name = rstring(db) dataset.description = rstring(info) dataset = updateService.saveAndReturnObject(dataset) datasets.append(dataset) if project: # and put it in a new project link = omero.model.ProjectDatasetLinkI() link.parent = omero.model.ProjectI(project.id.val, False) link.child = omero.model.DatasetI(dataset.id.val, False) updateService.saveAndReturnObject(link) for i in range(nimg): newImageName = str(db) print "Importing image: %d" % i description = "CTF-corrected image\n" if info: description += " %s\n" % info description += " Original Image ID: %s" % imageIds[i] print "importing from:" , dbpath d.read_image(dbpath, i) plane2D = EMNumPy.em2numpy(d) #print plane2D plane2Dlist = [plane2D] # single plane image image = scriptUtil.createNewImage(plane2Dlist, newImageName, description, dataset) attributes = d.get_attr_dict() # if we know the pixel size, set it in the new image if "apix_x" in attributes: physicalSizeX = float(attributes["apix_x"]) print "physicalSizeX" , physicalSizeX if "apix_y" in attributes: physicalSizeY = float(attributes["apix_y"]) print "physicalSizeY" , physicalSizeY else: physicalSizeY = physicalSizeX pixels = image.getPrimaryPixels() pixels.setPhysicalSizeX(rdouble(physicalSizeX)) pixels.setPhysicalSizeY(rdouble(physicalSizeY)) updateService.saveObject(pixels) f = open(fileName, 'w') # will overwrite each time. f.write("[GlobalMetadata]\n") # now add image attributes as "Original Metadata", sorted by key. keyList = list(attributes.keys()) keyList.sort() for k in keyList: #print k, attributes[k] f.write("%s=%s\n" % (k, attributes[k])) if k == "ptcl_source_image": print "Add link to image named: ", attributes[k] f.close() scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, fileName, "text/plain", None, namespace) # delete temp file os.remove(fileName) return datasets