def uploadImageToDataset(session, services, pixelsType, localImage, dataset=None, description="", imageName=None):
    
    """
    Uploads a local Spider image to an OMERO dataset. Same function exists in spider2omero.py.
    
    @param services     Map of OMERO services
    @param pixelsType   The OMERO PixelsType object for new image.
    @param imageName    The local image path/name. Also used for new image name. 
    @param dataset      Dataset to put images in, if specified. omero.model.Dataset
    """
    
    renderingEngine = services["renderingEngine"]
    queryService = services["queryService"]
    pixelsService = services["pixelsService"]
    rawPixelStore = services["rawPixelStore"]
    updateService = services["updateService"]
    rawFileStore = services["rawFileStore"]

    
    namespace = omero.constants.namespaces.NSCOMPANIONFILE 
    fileName = omero.constants.annotation.file.ORIGINALMETADATA
    
    if imageName == None:  imageName = localImage
    print "Importing image: %s" % imageName
    plane2D = spider2array(localImage)
    plane2Dlist = [plane2D]        # single plane image
    
    image = scriptUtil.createNewImage(session, plane2Dlist, imageName, description, dataset)
    
    # header is a list of values corresponding to attributes 
    header = getSpiderHeader(localImage)
    
    # if we know the pixel size, set it in the new image
    if len(header) >= 38:
        physicalSizeX = header[38]
        physicalSizeY = header[38]
        pixels = image.getPrimaryPixels()
        pixels.setPhysicalSizeX(rdouble(physicalSizeX))
        pixels.setPhysicalSizeY(rdouble(physicalSizeY))
        updateService.saveObject(pixels)
    
    # make a temp text file. 
    f = open(fileName, 'w')
    f.write("[GlobalMetadata]\n")

    # now add image attributes as "Original Metadata", sorted by key. 
    for i, h in enumerate(header):
        if i in spiderHeaderMap:
            f.write("%s=%s\n" % (spiderHeaderMap[i], h))
            
    f.close()

    scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, fileName, "text/plain", None, namespace)
    # delete temp file
    os.remove(fileName)
    return image
def uploadImageToDataset(services, pixelsType, imageArray, imageName, dataset=None):
    
    """
    Uploads a local Spider image to an OMERO dataset. Same function exists in spider2omero.py.
    
    @param services     Map of OMERO services
    @param pixelsType   The OMERO PixelsType object for new image.
    @param imageArray   Numpy array of pixel data - 2D
    @param imageName    The local file, for getting image header info
    @param dataset      Dataset to put images in, if specified. omero.model.Dataset
    """

    session = services["session"]
    queryService = services["queryService"]
    updateService = services["updateService"]
    rawFileStore = services["rawFileStore"]
    
    namespace = omero.constants.namespaces.NSCOMPANIONFILE 
    fileName = omero.constants.annotation.file.ORIGINALMETADATA
    
    print "Importing image: %s" % imageName
    description = ""
    if len(imageArray.shape) > 2:
        plane2Dlist = imageArray    # 3D array already. TODO: Need to check that volume is not mirrored (Z in correct order)
    else:
        plane2Dlist = [imageArray]  # single plane image
    
    name = os.path.basename(imageName)
    image = scriptUtil.createNewImage(session, plane2Dlist, name, description, dataset)
    
    # header is a list of values corresponding to attributes 
    header = getSpiderHeader(imageName)
    
    # if we know the pixel size, set it in the new image
    if len(header) >= 38:
        physicalSizeX = header[38]
        physicalSizeY = header[38]
        pixels = image.getPrimaryPixels()
        pixels.setPhysicalSizeX(rdouble(physicalSizeX))
        pixels.setPhysicalSizeY(rdouble(physicalSizeY))
        updateService.saveObject(pixels)
    
    # make a temp text file. 
    f = open(fileName, 'w')
    f.write("[GlobalMetadata]\n")

    # now add image attributes as "Original Metadata", sorted by key. 
    for i, h in enumerate(header):
        if i in spiderHeaderMap:
            f.write("%s=%s\n" % (spiderHeaderMap[i], h))
            
    f.close()

    scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, fileName, "text/plain", None, namespace)
    # delete temp file
    os.remove(fileName)
def save_as_cecog():
    """
    The main entry point of the script, as called by the client via the scripting service, passing the required parameters. 
    """
    
    client = scripts.client('Save_as_Cecog.py', """Script takes a single image and saves it as single plane tiff files, named according to 
    the MetaMorph_PlateScanPackage as used by Cecog Analyzer""", 
    
    scripts.Long("Image_ID", optional=False, grouping="1",
        description="The Image you want to Save As Cecog"),
        
    version = "4.2.1",
    authors = ["William Moore", "OME Team"],
    institutions = ["University of Dundee"],
    contact = "*****@*****.**",
    )

    try:
        session = client.getSession()

        # process the list of args above.
        parameterMap = {}
        for key in client.getInputKeys():
            if client.getInput(key):
                parameterMap[key] = client.getInput(key).getValue()

        print parameterMap

        queryService = session.getQueryService()
        updateService = session.getUpdateService()
        rawFileStore = session.createRawFileStore()
        
        curr_dir = os.getcwd()
        tiff_dir = os.path.join(curr_dir, "cecogZip")
        os.mkdir(tiff_dir)
        
        imageId = parameterMap["Image_ID"]
        image = queryService.get("Image", imageId)
        
        print "Downloading tiffs to %s" % tiff_dir
        
        split_image(client, imageId, tiff_dir, unformattedImageName = "cecog_P001_T%05d_C%s_Z%d_S1.tif", dims = ('T', 'C', 'Z'))

        zip_file_name = "Image_%s_tiffs.zip" % imageId
        zip_file = os.path.join(curr_dir, zip_file_name)
        compress(zip_file, tiff_dir)
        
        fileAnnotation = None
        if os.path.exists(zip_file_name):
            fileAnnotation = script_utils.uploadAndAttachFile(queryService, updateService, rawFileStore, image, zip_file_name, mimetype="zip")
        
        if fileAnnotation:
            client.setOutput("Message", rstring("Cecog Zip Created"))
            client.setOutput("File_Annotation", robject(fileAnnotation))
        else:
            client.setOutput("Message", rstring("Save failed - see Errors"))
            
    finally:
        client.closeSession()
def runSegger(session, parameterMap):
    """
    This is where the action happens.
    We get the parameters, download the image, write segger_nogui.py file and 
    pass this to Chimera from command line. 
    Then look for the generated .seg file and attach it to the image. 
    """
    
    # create services we need 
    queryService = session.getQueryService()
    rawPixelStore = session.createRawPixelsStore()
    rawFileStore = session.createRawFileStore()
    updateService = session.getUpdateService()
    
    # required parameters
    imageId = long(parameterMap["Image_ID"])
    image = queryService.get("Image", imageId)
    threshold = parameterMap["Threshold"]
    
    # optional parameters
    numit = 3
    if "Smoothing_Steps" in parameterMap:
        numit = parameterMap["Smoothing_Steps"]
    sdev = 1
    if "Standard_Deviation" in parameterMap:
        sdev = parameterMap["Standard_Deviation"]
    targNRegs = 1   # not sure what this default should be
    if "Target_Region_Count" in parameterMap:
        targNRegs = parameterMap["Target_Region_Count"]
    
    # local file names - indicate parameters
    name = "thr%.2fss%ssd%strc%s" % (threshold, numit, sdev, targNRegs)
    inputName = "%s.mrc" % name
    outputName = "%s.seg" % name
    
    downloadImage(queryService, rawPixelStore, imageId, inputName)
    if not os.path.exists(inputName):
        print "Failed to download image as %s" % inputName
    
    # write out a python file...
    scriptName = "segger_nogui.py"
    writePythonScript(scriptName, threshold, numit, sdev, targNRegs)
    if not os.path.exists(scriptName):
        print "Failed to write script file as %s" % scriptName
    
    chimeraCmd = "chimera --nogui %s %s" % (inputName, scriptName)
    print chimeraCmd
    
    os.system(chimeraCmd)
        
    # upload segger file
    if not os.path.exists(outputName):
        print "Segger file not created by Chimera from Input Image ID: %s" % imageId
    else:
        origFile = scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, outputName, "application/octet-stream")
        return origFile
Пример #5
0
def attachFileToImage(gateway, queryService, updateService, rawFileStore,
                      imageID, localName):
    image = getImage(gateway, imageID)
    return script_utils.uploadAndAttachFile(queryService,
                                            updateService,
                                            rawFileStore,
                                            image,
                                            localName,
                                            'CSV',
                                            description=None,
                                            namespace=NAMESPACE,
                                            origFilePathName=None)
Пример #6
0
def attachFileToProject(containerService, queryService, updateService,
                        rawFileStore, projectId, localName):
    project = getProject(containerService, projectId)
    return script_utils.uploadAndAttachFile(queryService,
                                            updateService,
                                            rawFileStore,
                                            project,
                                            localName,
                                            'PDF',
                                            description=None,
                                            namespace=None,
                                            origFilePathName=None)
Пример #7
0
def attachFileToDataset(containerService, queryService, updateService,
                        rawFileStore, datasetID, localName):
    dataset = getDataset(containerService, datasetID)
    return script_utils.uploadAndAttachFile(queryService,
                                            updateService,
                                            rawFileStore,
                                            dataset,
                                            localName,
                                            'PDF',
                                            description=None,
                                            namespace=None,
                                            origFilePathName=None)
Пример #8
0
def attachFileToImage(gateway, queryService, updateService, rawFileStore, imageID, localName):
    image = getImage(gateway, imageID)
    return script_utils.uploadAndAttachFile(
        queryService,
        updateService,
        rawFileStore,
        image,
        localName,
        "CSV",
        description=None,
        namespace=NAMESPACE,
        origFilePathName=None,
    )
Пример #9
0
def attachFileToProject(containerService, queryService, updateService, rawFileStore, projectId, localName):
    project = getProject(containerService, projectId)
    return script_utils.uploadAndAttachFile(
        queryService,
        updateService,
        rawFileStore,
        project,
        localName,
        "PDF",
        description=None,
        namespace=None,
        origFilePathName=None,
    )
Пример #10
0
def attachFileToDataset(containerService, queryService, updateService, rawFileStore, datasetID, localName):
    dataset = getDataset(containerService, datasetID)
    return script_utils.uploadAndAttachFile(
        queryService,
        updateService,
        rawFileStore,
        dataset,
        localName,
        "PDF",
        description=None,
        namespace=None,
        origFilePathName=None,
    )
Пример #11
0
    def testRunSpiderProcedure(self):
        """
        Tests the runSpiderProcedure.py script by uploading a simple Spider Procedure File (spf) to 
        OMERO as an Original File, creating an image in OMERO, then
        running the export2em.py script from command line and checking that an image has been exported. 
        The saveImageAs.py script is first uploaded to the scripting service, since this is required by export2em.py
        """
        print "testRunSpiderProcedure"

        # root session is root.sf
        uuid = self.root.sf.getAdminService().getEventContext().sessionUuid
        admin = self.root.sf.getAdminService()

        # upload saveImageAs.py script as root
        scriptService = self.root.sf.getScriptService()
        scriptId = self.uploadScript(scriptService, runSpiderScriptPath)

        session = self.root.sf  # do everything as root for now

        # create services
        queryService = session.getQueryService()
        updateService = session.getUpdateService()
        rawFileStore = session.createRawFileStore()
        containerService = session.getContainerService()

        # import image
        image = importImage(session, smallTestImage)
        iId = image.getId().getValue()

        # put image in dataset and project
        # create dataset
        dataset = omero.model.DatasetI()
        dataset.name = rstring("spider-test")
        dataset = updateService.saveAndReturnObject(dataset)
        # create project
        project = omero.model.ProjectI()
        project.name = rstring("spider-test")
        project = updateService.saveAndReturnObject(project)
        # put dataset in project
        link = omero.model.ProjectDatasetLinkI()
        link.parent = omero.model.ProjectI(project.id.val, False)
        link.child = omero.model.DatasetI(dataset.id.val, False)
        updateService.saveAndReturnObject(link)
        # put image in dataset
        dlink = omero.model.DatasetImageLinkI()
        dlink.parent = omero.model.DatasetI(dataset.id.val, False)
        dlink.child = omero.model.ImageI(image.id.val, False)
        updateService.saveAndReturnObject(dlink)

        # create and upload a Spider Procedure File
        # make a temp text file. Example from http://www.wadsworth.org/spider_doc/spider/docs/quickstart.html
        f = open("spider.spf", 'w')
        f.write("RT\n")
        f.write("test001\n")
        f.write("rot001\n")
        f.write("60\n")
        f.write("\n")
        f.write("IP\n")
        f.write("rot001\n")
        f.write("big001\n")
        f.write("150,150\n")
        f.write("\n")
        f.write("WI\n")
        f.write("big001\n")
        f.write("win001\n")
        f.write("75,75\n")
        f.write("1,75\n")
        f.write("\n")
        f.write("EN D")
        f.close()
        fileAnnot = scriptUtil.uploadAndAttachFile(queryService, updateService,
                                                   rawFileStore, image,
                                                   "spider.spf", "text/plain")
        os.remove("spider.spf")

        newDatasetName = "spider-results"
        # run script
        ids = [
            omero.rtypes.rlong(iId),
        ]
        argMap = {
            "IDs": omero.rtypes.rlist(ids),  # process these images
            "Data_Type": omero.rtypes.rstring("Image"),
            "Spf": omero.rtypes.rstring(str(fileAnnot.id.val)),
            "New_Dataset_Name": omero.rtypes.rstring(newDatasetName),
            "Input_Name": omero.rtypes.rstring("test001"),
            "Output_Name": omero.rtypes.rstring("win001")
        }
        runScript(scriptService, self.root, scriptId, argMap)

        # check that image has been created.
        # now we should have a dataset with 1 image, in project
        pros = containerService.loadContainerHierarchy("Project",
                                                       [project.id.val], None)
        datasetFound = False
        for p in pros:
            for ds in p.linkedDatasetList():
                if ds.name.val == newDatasetName:
                    datasetFound = True
                    dsId = ds.id.val
                    iList = containerService.getImages("Dataset", [dsId], None)
                    self.assertEquals(1, len(iList))
        self.assertTrue(datasetFound, "No dataset found with images from ROIs")
Пример #12
0
def writeMovie(commandArgs, session):
    """
    Makes the movie.
    
    @ returns        Returns the file annotation
    """
    log("Movie created by OMERO")
    log("")
    gateway = session.createGateway()
    scriptService = session.getScriptService()
    queryService = session.getQueryService()
    updateService = session.getUpdateService()
    rawFileStore = session.createRawFileStore()
    
    omeroImage = gateway.getImage(commandArgs["Image_ID"])
    pixelsList = gateway.getPixelsFromImage(commandArgs["Image_ID"])
    pixels = pixelsList[0]
    pixelsId = pixels.getId().getValue()

    sizeX = pixels.getSizeX().getValue()
    sizeY = pixels.getSizeY().getValue()
    sizeZ = pixels.getSizeZ().getValue()
    sizeC = pixels.getSizeC().getValue()
    sizeT = pixels.getSizeT().getValue()

    if (sizeX==None or sizeY==None or sizeZ==None or sizeT==None or sizeC==None):
        return

    if (pixels.getPhysicalSizeX()==None):
        commandArgs["Scalebar"]=0

    cRange = range(0, sizeC)
    if "Channels" in commandArgs and validChannels(commandArgs["Channels"], sizeC):
        cRange = commandArgs["Channels"]

    tzList = calculateRanges(sizeZ, sizeT, commandArgs)

    timeMap = calculateAquisitionTime(session, pixelsId, cRange, tzList)
    if (timeMap==None):
        commandArgs["Show_Time"]=False
    if (timeMap != None):
        if (len(timeMap)==0):
            commandArgs["Show_Time"]=False

    pixelTypeString = pixels.getPixelsType().getValue().getValue()
    frameNo = 1
    renderingEngine = getRenderingEngine(session, pixelsId, sizeC, cRange)

    overlayColour = (255,255,255)
    if "Overlay_Colour" in commandArgs:
        r,g,b,a = COLOURS[commandArgs["Overlay_Colour"]]
        overlayColour = (r,g,b)
    
    format = commandArgs["Format"]
    fileNames = []
    for tz in tzList:
        t = tz[0]
        z = tz[1]
        plane = getPlane(renderingEngine, z, t)
        planeImage = numpy.array(plane, dtype='uint32')
        planeImage = planeImage.byteswap()
        planeImage = planeImage.reshape(sizeX, sizeY)
        image = Image.frombuffer('RGBA',(sizeX,sizeY),planeImage.data,'raw','ARGB',0,1)
        
        if "Scalebar" in commandArgs and commandArgs["Scalebar"]:
            image = addScalebar(commandArgs["Scalebar"], image, pixels, commandArgs)
        planeInfo = "z:"+str(z)+"t:"+str(t)
        if "Show_Time" in commandArgs and commandArgs["Show_Time"]:
            time = timeMap[planeInfo]
            image = addTimePoints(time, pixels, image, overlayColour)
        if "Show_Plane_Info" in commandArgs and commandArgs["Show_Plane_Info"]:
            image = addPlaneInfo(z, t, pixels, image, overlayColour)
        if format==QT:
            filename = str(frameNo)+'.png'
            image.save(filename,"PNG")
        else:
            filename = str(frameNo)+'.jpg'
            image.save(filename,"JPEG")
        fileNames.append(filename)
        frameNo +=1
        
    filelist= ",".join(fileNames)
        
    ext = formatMap[format]
    movieName = "Movie"
    if "Movie_Name" in commandArgs:
        movieName = commandArgs["Movie_Name"]
    if not movieName.endswith(".%s" % ext):
        movieName = "%s.%s" % (movieName, ext)
        
    framesPerSec = 2
    if "FPS" in commandArgs:
        framesPerSec = commandArgs["FPS"]
    buildAVI(sizeX, sizeY, filelist, framesPerSec, movieName, format)
    figLegend = "\n".join(logLines)
    mimetype = formatMimetypes[format]
    fileAnnotation = scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, omeroImage, movieName, mimetype, figLegend)
    return fileAnnotation
Пример #13
0
def roiFigure(session, commandArgs):    
    """
        This processes the script parameters, adding defaults if needed. 
        Then calls a method to make the figure, and finally uploads and attaches this to the primary image.
        
        @param: session        The OMERO session
        @param: commandArgs        Map of String:Object parameters for the script. 
                                Objects are not rtypes, since getValue() was called when the map was processed below. 
                                But, list and map objects may contain rtypes (need to call getValue())
        
        @return:     the id of the originalFileLink child. (ID object, not value) 
    """
    
    # create the services we're going to need. 
    metadataService = session.getMetadataService()
    queryService = session.getQueryService()
    updateService = session.getUpdateService()
    rawFileStore = session.createRawFileStore()
    containerService = session.getContainerService()
    
    log("ROI figure created by OMERO on %s" % date.today())
    log("")
    
    pixelIds = []
    imageIds = []
    imageLabels = []
    imageNames = {}
    omeroImage = None    # this is set as the first image, to link figure to

    # function for getting image labels.
    def getLabels(fullName, tagsList, pdList):
        name = fullName.split("/")[-1]
        return [name]
        
    # default function for getting labels is getName (or use datasets / tags)
    if "Image_Labels" in commandArgs:
        if commandArgs["Image_Labels"] == "Datasets":
            def getDatasets(name, tagsList, pdList):
                return [dataset for project, dataset in pdList]
            getLabels = getDatasets
        elif commandArgs["Image_Labels"] == "Tags":
            def getTags(name, tagsList, pdList):
                return tagsList
            getLabels = getTags
            
    # process the list of images. If imageIds is not set, script can't run. 
    log("Image details:")
    dataType = commandArgs["Data_Type"]
    ids = commandArgs["IDs"]
    images = containerService.getImages(dataType, ids, None)

    for idCount, image in enumerate(images):
        iId = image.getId().getValue()
        imageIds.append(iId)
        if idCount == 0:
            omeroImage = image        # remember the first image to attach figure to
        pixelIds.append(image.getPrimaryPixels().getId().getValue())
        imageNames[iId] = image.getName().getValue()

    if len(imageIds) == 0:
        print "No image IDs specified."
        return
            
    pdMap = figUtil.getDatasetsProjectsFromImages(queryService, imageIds)    # a map of imageId : list of (project, dataset) names. 
    tagMap = figUtil.getTagsFromImages(metadataService, imageIds)
    # Build a legend entry for each image
    for iId in imageIds:
        name = imageNames[iId]
        imageDate = image.getAcquisitionDate().getValue()
        tagsList = tagMap[iId]
        pdList = pdMap[iId]
        
        tags = ", ".join(tagsList)
        pdString = ", ".join(["%s/%s" % pd for pd in pdList])
        log(" Image: %s  ID: %d" % (name, iId))
        log("  Date: %s" % date.fromtimestamp(imageDate/1000))
        log("  Tags: %s" % tags)
        log("  Project/Datasets: %s" % pdString)
        
        imageLabels.append(getLabels(name, tagsList, pdList))
    
    # use the first image to define dimensions, channel colours etc. 
    pixelsId = pixelIds[0]
    pixels = queryService.get("Pixels", pixelsId)

    sizeX = pixels.getSizeX().getValue();
    sizeY = pixels.getSizeY().getValue();
    sizeZ = pixels.getSizeZ().getValue();
    sizeC = pixels.getSizeC().getValue();
    
    width = sizeX
    if "Width" in commandArgs:
        w = commandArgs["Width"]
        try:
            width = int(w)
        except:
            log("Invalid width: %s Using default value: %d" % (str(w), sizeX))
    
    height = sizeY
    if "Height" in commandArgs:
        h = commandArgs["Height"]
        try:
            height = int(h)
        except:
            log("Invalid height: %s Using default value" % (str(h), sizeY))
            
    log("Image dimensions for all panels (pixels): width: %d  height: %d" % (width, height))
        
    # the channels in the combined image,
    if "Merged_Channels" in commandArgs:
        mergedIndexes = [c-1 for c in commandArgs["Merged_Channels"]]  # convert to 0-based
    else:
        mergedIndexes = range(sizeC) # show all
    mergedIndexes.reverse()
        
    mergedColours = {}    # if no colours added, use existing rendering settings.
    # Actually, nicer to always use existing rendering settings.
    #if "Merged_Colours" in commandArgs:
    #    for i, c in enumerate(commandArgs["Merged_Colours"]):
    #        if c in COLOURS:
    #            mergedColours[i] = COLOURS[c]
    
    algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY
    if "Algorithm" in commandArgs:
        a = commandArgs["Algorithm"]
        if (a == "Mean Intensity"):
            algorithm = omero.constants.projection.ProjectionType.MEANINTENSITY
    
    stepping = 1
    if "Stepping" in commandArgs:
        s = commandArgs["Stepping"]
        if (0 < s < sizeZ):
            stepping = s
    
    scalebar = None
    if "Scalebar" in commandArgs:
        sb = commandArgs["Scalebar"]
        try:
            scalebar = int(sb)
            if scalebar <= 0:
                scalebar = None
            else:
                log("Scalebar is %d microns" % scalebar)
        except:
            log("Invalid value for scalebar: %s" % str(sb))
            scalebar = None
    
    overlayColour = (255,255,255)
    if "Scalebar_Colour" in commandArgs:
        if commandArgs["Scalebar_Colour"] in OVERLAY_COLOURS: 
            r,g,b,a = OVERLAY_COLOURS[commandArgs["Scalebar_Colour"]]
            overlayColour = (r,g,b)
    
    roiZoom = None
    if "Roi_Zoom" in commandArgs:
        roiZoom = float(commandArgs["Roi_Zoom"])
        if roiZoom == 0:
            roiZoom = None
            
    maxColumns = None
    if "Max_Columns" in commandArgs:
        maxColumns = commandArgs["Max_Columns"]
        
    showRoiDuration = False
    if "Show_Roi_Duration" in commandArgs:
        showRoiDuration = commandArgs["Show_Roi_Duration"]
    
    roiLabel = "FigureROI"
    if "Roi_Selection_Label" in commandArgs:
        roiLabel = commandArgs["Roi_Selection_Label"]
                
    spacer = (width/50) + 2
    
    fig = getSplitView(session, imageIds, pixelIds, mergedIndexes, 
            mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, overlayColour, roiZoom, 
            maxColumns, showRoiDuration, roiLabel)
                                                    
    #fig.show()        # bug-fixing only
    
    log("")
    figLegend = "\n".join(logStrings)
    
    #print figLegend    # bug fixing only
    
    format = JPEG
    if "Format" in commandArgs:
        if commandArgs["Format"] == "PNG":
            format = PNG
            
    output = "movieROIFigure"
    if "Figure_Name" in commandArgs:
        output = str(commandArgs["Figure_Name"])
        
    if format == PNG:
        output = output + ".png"
        fig.save(output, "PNG")
    else:
        output = output + ".jpg"
        fig.save(output)
    
    # Use util method to upload the figure 'output' to the server, attaching it to the omeroImage, adding the 
    # figLegend as the fileAnnotation description. 
    # Returns the id of the originalFileLink child. (ID object, not value)
    fileAnnotation = scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, omeroImage, output, format, figLegend)
    return (fileAnnotation, omeroImage)
Пример #14
0
def movieFigure(session, commandArgs):    
    """
    Makes the figure using the parameters in @commandArgs, attaches the figure to the 
    parent Project/Dataset, and returns the file-annotation ID
    
    @param session        The OMERO session
    @param commandArgs    Map of parameters for the script
    @ returns            Returns the id of the originalFileLink child. (ID object, not value)
    """
    
    # create the services we're going to need. 
    metadataService = session.getMetadataService()
    queryService = session.getQueryService()
    updateService = session.getUpdateService()
    rawFileStore = session.createRawFileStore()
    containerService = session.getContainerService()
    
    log("Movie figure created by OMERO on %s" % date.today())
    log("")
    
    timeLabels = {"SECS_MILLIS": "seconds",
                "SECS": "seconds",
                "MINS": "minutes",
                "HOURS": "hours",
                "MINS_SECS": "mins:secs",
                "HOURS_MINS": "hours:mins"}
    timeUnits = "SECS"
    if "timeUnits" in commandArgs:
        timeUnits = commandArgs["timeUnits"]
    if timeUnits not in timeLabels.keys():
        timeUnits = "SECS"
    log("Time units are in %s" % timeLabels[timeUnits])
    
    pixelIds = []
    imageIds = []
    imageLabels = []
    imageNames = {}
    omeroImage = None    # this is set as the first image, to link figure to

    # function for getting image labels.
    def getLabels(fullName, tagsList, pdList):
        name = fullName.split("/")[-1]
        return [name]
        
    # default function for getting labels is getName (or use datasets / tags)
    if "Image_Labels" in commandArgs:
        if commandArgs["Image_Labels"] == "Datasets":
            def getDatasets(name, tagsList, pdList):
                return [dataset for project, dataset in pdList]
            getLabels = getDatasets
        elif commandArgs["Image_Labels"] == "Tags":
            def getTags(name, tagsList, pdList):
                return tagsList
            getLabels = getTags
            
    # process the list of images. If imageIds is not set, script can't run. 
    log("Image details:")
    for idCount, imageId in enumerate(commandArgs["IDs"]):
        iId = long(imageId.getValue())
        image = containerService.getImages("Image", [iId], None)[0]
        if image == None:
            print "Image not found for ID:", iId
            continue
        imageIds.append(iId)
        if idCount == 0:
            omeroImage = image        # remember the first image to attach figure to
        pixelIds.append(image.getPrimaryPixels().getId().getValue())
        imageNames[iId] = image.getName().getValue()
        
    if len(imageIds) == 0:
        print "No image IDs specified."
    
    pdMap = figUtil.getDatasetsProjectsFromImages(queryService, imageIds)    # a map of imageId : list of (project, dataset) names. 
    tagMap = figUtil.getTagsFromImages(metadataService, imageIds)
    # Build a legend entry for each image
    for iId in imageIds:
        name = imageNames[iId]
        imageDate = image.getAcquisitionDate().getValue()
        tagsList = tagMap[iId]
        pdList = pdMap[iId]
        
        tags = ", ".join(tagsList)
        pdString = ", ".join(["%s/%s" % pd for pd in pdList])
        log(" Image: %s  ID: %d" % (name, iId))
        log("  Date: %s" % date.fromtimestamp(imageDate/1000))
        log("  Tags: %s" % tags)
        log("  Project/Datasets: %s" % pdString)
        
        imageLabels.append(getLabels(name, tagsList, pdList))
    
    
    # use the first image to define dimensions, channel colours etc. 
    pixelsId = pixelIds[0]
    pixels = queryService.get("Pixels", pixelsId)
                
    sizeX = pixels.getSizeX().getValue()
    sizeY = pixels.getSizeY().getValue()
    sizeZ = pixels.getSizeZ().getValue()
    sizeC = pixels.getSizeC().getValue()
    sizeT = pixels.getSizeT().getValue()

    tIndexes = []
    if "T_Indexes" in commandArgs:
        for t in commandArgs["T_Indexes"]:
            tIndexes.append(t.getValue())
        print "T_Indexes", tIndexes
    if len(tIndexes) == 0:      # if no t-indexes given, use all t-indices
        tIndexes = range(sizeT)
            
    zStart = -1
    zEnd = -1
    if "Z_Start" in commandArgs:
        zStart = commandArgs["Z_Start"]
    if "Z_End" in commandArgs:
        zEnd = commandArgs["Z_End"]
    
    width = sizeX
    if "Width" in commandArgs:
        width = commandArgs["Width"]
    
    height = sizeY
    if "Height" in commandArgs:
        height = commandArgs["Height"]
    
    spacer = (width/25) + 2
    
    algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY
    if "Algorithm" in commandArgs:
        a = commandArgs["Algorithm"]
        if (a == "Mean Intensity"):
            algorithm = omero.constants.projection.ProjectionType.MEANINTENSITY
    
    stepping = 1
    if "Stepping" in commandArgs:
        s = commandArgs["Stepping"]
        if (0 < s < sizeZ):
            stepping = s
    
    scalebar = None
    if "Scalebar_Size" in commandArgs:
        sb = commandArgs["Scalebar_Size"]
        try:
            scalebar = int(sb)
            if scalebar <= 0:
                scalebar = None
            else:
                log("Scalebar is %d microns" % scalebar)
        except:
            log("Invalid value for scalebar: %s" % str(sb))
            scalebar = None
    
    overlayColour = (255,255,255)
    if "Scalebar_Colour" in commandArgs:
        r,g,b,a = OVERLAY_COLOURS[commandArgs["Scalebar_Colour"]]
        overlayColour = (r,g,b)
                
    figure = createMovieFigure(session, pixelIds, tIndexes, zStart, zEnd, width, height, spacer, 
                            algorithm, stepping, scalebar, overlayColour, timeUnits, imageLabels)
    
    #figure.show()
    
    log("")
    figLegend = "\n".join(logLines)
    
    #print figLegend    # bug fixing only
    
    format = JPEG
    if "Format" in commandArgs:
        if commandArgs["Format"] == "PNG":
            format = PNG
            
    output = "movieFigure"
    if "Figure_Name" in commandArgs:
        output = str(commandArgs["Figure_Name"])
        
    if format == PNG:
        output = output + ".png"
        figure.save(output, "PNG")
    else:
        output = output + ".jpg"
        figure.save(output)
    

    fileAnnotation = scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, omeroImage, output, format, figLegend)
    return (fileAnnotation, omeroImage)    
Пример #15
0
 def testRunSpiderProcedure(self):
     """
     Tests the runSpiderProcedure.py script by uploading a simple Spider Procedure File (spf) to 
     OMERO as an Original File, creating an image in OMERO, then
     running the export2em.py script from command line and checking that an image has been exported. 
     The saveImageAs.py script is first uploaded to the scripting service, since this is required by export2em.py
     """
     print "testRunSpiderProcedure"
     
     # root session is root.sf
     uuid = self.root.sf.getAdminService().getEventContext().sessionUuid
     admin = self.root.sf.getAdminService()
     
     # upload saveImageAs.py script as root
     scriptService = self.root.sf.getScriptService()
     scriptId = self.uploadScript(scriptService, runSpiderScriptPath)
     
     session = self.root.sf      # do everything as root for now 
     
     # create services
     queryService = session.getQueryService()
     updateService = session.getUpdateService()
     rawFileStore = session.createRawFileStore()
     containerService = session.getContainerService()
     
     # import image
     image = importImage(session, smallTestImage)
     iId = image.getId().getValue()
     
     # put image in dataset and project
     # create dataset
     dataset = omero.model.DatasetI()
     dataset.name = rstring("spider-test")
     dataset = updateService.saveAndReturnObject(dataset)
     # create project
     project = omero.model.ProjectI()
     project.name = rstring("spider-test")
     project = updateService.saveAndReturnObject(project)
     # put dataset in project 
     link = omero.model.ProjectDatasetLinkI()
     link.parent = omero.model.ProjectI(project.id.val, False)
     link.child = omero.model.DatasetI(dataset.id.val, False)
     updateService.saveAndReturnObject(link)
     # put image in dataset
     dlink = omero.model.DatasetImageLinkI()
     dlink.parent = omero.model.DatasetI(dataset.id.val, False)
     dlink.child = omero.model.ImageI(image.id.val, False)
     updateService.saveAndReturnObject(dlink)
     
     # create and upload a Spider Procedure File
     # make a temp text file. Example from http://www.wadsworth.org/spider_doc/spider/docs/quickstart.html
     f = open("spider.spf", 'w')
     f.write("RT\n")
     f.write("test001\n")
     f.write("rot001\n")
     f.write("60\n")
     f.write("\n")
     f.write("IP\n")
     f.write("rot001\n")
     f.write("big001\n")
     f.write("150,150\n")
     f.write("\n")
     f.write("WI\n")
     f.write("big001\n")
     f.write("win001\n")
     f.write("75,75\n")
     f.write("1,75\n")
     f.write("\n")
     f.write("EN D")
     f.close() 
     fileAnnot = scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, "spider.spf", "text/plain")
     os.remove("spider.spf")
     
     newDatasetName = "spider-results"
     # run script
     ids = [omero.rtypes.rlong(iId), ]
     argMap = {"IDs":omero.rtypes.rlist(ids),       # process these images
             "Data_Type": omero.rtypes.rstring("Image"),
             "Spf": omero.rtypes.rstring(str(fileAnnot.id.val)),
             "New_Dataset_Name": omero.rtypes.rstring(newDatasetName),
             "Input_Name": omero.rtypes.rstring("test001"),
             "Output_Name": omero.rtypes.rstring("win001")}
     runScript(scriptService, self.root, scriptId, argMap)
     
     # check that image has been created. 
     # now we should have a dataset with 1 image, in project
     pros = containerService.loadContainerHierarchy("Project", [project.id.val], None)
     datasetFound = False
     for p in pros:
         for ds in p.linkedDatasetList():
             if ds.name.val == newDatasetName:
                 datasetFound = True
                 dsId = ds.id.val
                 iList = containerService.getImages("Dataset", [dsId], None)
                 self.assertEquals(1, len(iList))
     self.assertTrue(datasetFound, "No dataset found with images from ROIs")
Пример #16
0
def uploadBdbAsDataset(infile, dataset):
    
    """
    @param infile       path to bdb (absolute OR from where we are running) OR this can be a list of image paths. 
    @param dataset      Dataset to put images in (omero.model.DatasetI)
    """

    imageList = None
    # particleExt will be "ptcls" or "flip" or "wiener" if we are importing original particles
    # particleExt will be "data" or "flipped" or "filtered" if we are importing sets particles
    particleExt = None
    nimg = 0
    try:
        nimg = EMUtil.get_image_count(infile)    # eg images in bdb 'folder'
        particleExt = infile.split("_")[-1]
        print "Found %d %s images to import from: %s to dataset: %s" % (nimg, particleExt, infile, dataset.name.val)
    except:
        nimg = len(infile)    # OK, we're probably dealing with a list
        imageList = infile
        print "Importing %d images to dataset: %s" % (nimg, dataset.name.val)
    
    if nimg == 0:
        return
        
    d = EMData()
    # use first image to get data-type (assume all the same!)
    if imageList:
        d.read_image(imageList[0])
    else:
        d.read_image(infile, 0)
    plane2D = EMNumPy.em2numpy(d)
    pType = plane2D.dtype.name
    print pType
    pixelsType = queryService.findByQuery("from PixelsType as p where p.value='%s'" % pType, None) # omero::model::PixelsType
    
    if pixelsType == None and pType.startswith("float"):
        # try 'float'
        pixelsType = queryService.findByQuery("from PixelsType as p where p.value='%s'" % "float", None) # omero::model::PixelsType
    if pixelsType == None:
        print "Unknown pixels type for: " % pType
        return
    else:
        print "Using pixels type ", pixelsType.getValue().getValue()
    
    # identify the original metadata file with these values
    namespace = omero.constants.namespaces.NSCOMPANIONFILE 
    origFilePath = omero.constants.annotation.file.ORIGINALMETADATAPREFIX  #"/openmicroscopy.org/omero/image_files/"
    fileName = omero.constants.annotation.file.ORIGINALMETADATA
    
    # loop through all the images.
    nimg = min(50, nimg) 
    for i in range(nimg):
        description = "Imported from EMAN2 bdb: %s" % infile
        newImageName = ""
        if imageList:
            h, newImageName = os.path.split(imageList[i])
            print "\nReading image: %s  (%s / %s)" % (imageList[i], i, nimg)
            d.read_image(imageList[i])
        else:
            newImageName = "%d" % i
            print "\nReading image: %s / %s" % (i, nimg)
            d.read_image(infile, i)
        plane2D = EMNumPy.em2numpy(d)
        #display(d)
        #plane2D *= 100     # temporary hack to avoid rendering problem with small numbers. 
        #planeMin = int(plane2D.min())
        #plane2D -= planeMin     # make min = 0
        #print plane2D
        plane2Dlist = [plane2D]        # single plane image
        
        # test attributes for source image link
        attributes = d.get_attr_dict()
        particleSource = ""
        if "ptcl_source_image" in attributes:
            parentName = attributes["ptcl_source_image"]
            newImageName = parentName   # name the particle after it's parent
            description = description + "\nSource Image: %s" % parentName
            particleSource += parentName
            if parentName in newImageMap:
                #print "Add link to image named: ", parentName
                # simply add to description, since we don't have Image-Image links yet
                description = description + "\nSource Image ID: %s" % newImageMap[parentName]
        if "ptcl_source_coord" in attributes:
            try:
                x, y = attributes["ptcl_source_coord"]
                particleSource = "%s.%d.%d" % (particleSource, x, y)
                xCoord = float(x)
                yCoord = float(y)
                description = description + "\nSource Coordinates: %.1f, %.1f" % (xCoord, yCoord)
            except: pass
            
        # if we are importing the reference images for class averages, add link to original particle
        if particleExt != None and particleExt.endswith("all4"):
            particleid = "%s.%s" % (particleSource, "ptcls") # 'ptcls' links to original particles. 
            print "Adding link from all4 to original particle", particleid
            if particleid in newImageMap:
                description = description + "\nParticle Image ID: %s" % newImageMap[particleid]
        
        # if this particle has been imported already, simple put it in the dataset...
        if "data_path" in attributes:
            if particleExt in particleSetExtMap:    # E.g. "data" 
                originalParticleExt = particleSetExtMap[particleExt]    # E.g. "ptcls"
                particleSource += ".%s" % originalParticleExt
                if particleSource in newImageMap:
                    print particleSource, "already imported..."
                    particleId = newImageMap[particleSource]
            
                    link = omero.model.DatasetImageLinkI()
                    link.parent = omero.model.DatasetI(dataset.id.val, False)
                    link.child = omero.model.ImageI(particleId, False)
                    updateService.saveAndReturnObject(link)
                    continue
        
        # if we are dealing with a class average:
        if "class_ptcl_idxs" in attributes:
            particleIndexes = attributes["class_ptcl_idxs"]
            omeroIds = []
            for index in particleIndexes:
                if index in all4map:
                    omeroIds.append(all4map[index])
            ds = createDataset("class %s"%i, project=None, imageIds=omeroIds)
            description += "\nMember particles in Dataset ID: %s" % ds.id.val
            
        # create new Image from numpy data.
        print "Creating image in OMERO and uploading data..."
        image = scriptUtil.createNewImage(session, plane2Dlist, newImageName, description, dataset)
        imageId = image.getId().getValue()
        
        
        # if we know the pixel size, set it in the new image
        if "apix_x" in attributes:
            physicalSizeX = float(attributes["apix_x"])
            print "physicalSizeX" , physicalSizeX
            if "apix_y" in attributes:
                physicalSizeY = float(attributes["apix_y"])
                print "physicalSizeY" , physicalSizeY
            else:
                physicalSizeY = physicalSizeX
            pixels = image.getPrimaryPixels()
            pixels.setPhysicalSizeX(rdouble(physicalSizeX))
            pixels.setPhysicalSizeY(rdouble(physicalSizeY))
            updateService.saveObject(pixels)
             
        # make a map of name: imageId, for creating image links
        if particleExt != None and particleExt.endswith("all4"):
            all4map[i] = imageId
        elif particleSource:
            particleSource += ".%s" % particleExt
            print particleSource, "added to map"
            newImageMap[particleSource] = imageId
        else:
            print newImageName, "added to map"
            newImageMap[newImageName] = imageId
            
        
        f = open(fileName, 'w')        # will overwrite each time. 
        f.write("[GlobalMetadata]\n")
        
        # now add image attributes as "Original Metadata", sorted by key. 
        keyList = list(attributes.keys())    
        keyList.sort()
        for k in keyList:
            #print k, attributes[k]
            f.write("%s=%s\n" % (k, attributes[k]))
        f.close()
        
        filePath = "%s%s/%s" % (origFilePath, imageId, fileName)
        print "Uploading %s to Image: %s with path: %s" % (fileName, imageId, filePath)
        scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, fileName, "text/plain", None, namespace, filePath)
    # delete temp file
    if os.path.exists(fileName):    os.remove(fileName)
def uploadBdbsAsDataset(services, bdbContainer, imageIds, project = None, info = None):
    
    """
    This method takes a folder that contains multiple bdbs, each representing different ctf output. 
    This is what is generated by the e2ctf.py command, when it is running on images not in a bdb. 
    Output images (in the form of bdbs) are placed in a 'particles' folder in the current directory.
    This method uploads all the images in the given directory into a new dataset per bdb.  
    
    @param bdbContainer     path to bdb. In this script, it is a relative path (folder name) E.g. 'particles'
    @param imageIds         The OMERO image-ids that these particles come from. Add to descriptions 
    @param project          if specified, put each dataset into this project (omero.model.ProjectI)
    @param info             Extra info to add to dataset description and image descriptions. 
    
    """
    
    re = services["renderingEngine"]
    queryService = services["queryService"]
    pixelsService = services["pixelsService"]
    rawPixelStore = services["rawPixelsStore"]
    updateService = services["updateService"]
    rawFileStore = services["rawFileStore"]

    # get the list of bdbs
    dbs = db_list_dicts('bdb:%s' % bdbContainer)
    
    print dbs
    if len(dbs) == 0:
        print "No bdb images to upload"
        return
    
    d = EMData()
    # use first image to get data-type (assume all the same!)
    dbpath = "bdb:particles#%s" % dbs[0]
    d.read_image(dbpath, 0)
    plane2D = EMNumPy.em2numpy(d)
    
    namespace = omero.constants.namespaces.NSCOMPANIONFILE 
    fileName = omero.constants.annotation.file.ORIGINALMETADATA
    
    # loop through all the images. 
    datasets = []
    for db in dbs:
        dbpath = "bdb:particles#%s" % db 
        nimg = EMUtil.get_image_count(dbpath)    # eg images in bdb 'folder'
        print "Found %d images to import from: %s" % (nimg, dbpath)
        
        # make a dataset for images
        dataset = omero.model.DatasetI()
        dataset.name = rstring(db)
        dataset.description = rstring(info)
        dataset = updateService.saveAndReturnObject(dataset)
        datasets.append(dataset)
        if project:        # and put it in a new project
            link = omero.model.ProjectDatasetLinkI()
            link.parent = omero.model.ProjectI(project.id.val, False)
            link.child = omero.model.DatasetI(dataset.id.val, False)
            updateService.saveAndReturnObject(link)
            
        for i in range(nimg):
            newImageName = str(db)
            print "Importing image: %d" % i
            description = "CTF-corrected image\n"
            if info:    description += " %s\n" % info
            description += " Original Image ID: %s" % imageIds[i]
            print "importing from:" , dbpath
            d.read_image(dbpath, i)
            plane2D = EMNumPy.em2numpy(d)
            #print plane2D
            plane2Dlist = [plane2D]        # single plane image
        
            image = scriptUtil.createNewImage(plane2Dlist, newImageName, description, dataset)
            attributes = d.get_attr_dict()
            # if we know the pixel size, set it in the new image
            if "apix_x" in attributes:
                physicalSizeX = float(attributes["apix_x"])
                print "physicalSizeX" , physicalSizeX
                if "apix_y" in attributes:
                    physicalSizeY = float(attributes["apix_y"])
                    print "physicalSizeY" , physicalSizeY
                else:
                    physicalSizeY = physicalSizeX
                pixels = image.getPrimaryPixels()
                pixels.setPhysicalSizeX(rdouble(physicalSizeX))
                pixels.setPhysicalSizeY(rdouble(physicalSizeY))
                updateService.saveObject(pixels)
            
            f = open(fileName, 'w')        # will overwrite each time. 
            f.write("[GlobalMetadata]\n")
        
            # now add image attributes as "Original Metadata", sorted by key. 
            keyList = list(attributes.keys())     
            keyList.sort()
            for k in keyList:
                #print k, attributes[k]
                f.write("%s=%s\n" % (k, attributes[k]))
                if k == "ptcl_source_image":
                    print "Add link to image named: ", attributes[k]
            f.close()
        
            scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, fileName, "text/plain", None, namespace)
        # delete temp file
        os.remove(fileName)
    return datasets
def roiFigure(session, commandArgs):    
    """
        This processes the script parameters, adding defaults if needed. 
        Then calls a method to make the figure, and finally uploads and attaches this to the primary image.
        
        @param: session        The OMERO session
        @param: commandArgs        Map of String:Object parameters for the script. 
                                Objects are not rtypes, since getValue() was called when the map was processed below. 
                                But, list and map objects may contain rtypes (need to call getValue())
        
        @return:     the id of the originalFileLink child. (ID object, not value) 
    """
    
    # create the services we're going to need. 
    metadataService = session.getMetadataService()
    queryService = session.getQueryService()
    updateService = session.getUpdateService()
    rawFileStore = session.createRawFileStore()
    containerService = session.getContainerService()
    
    log("ROI figure created by OMERO on %s" % date.today())
    log("")
    
    pixelIds = []
    imageIds = []
    imageLabels = []
    imageNames = {}
    omeroImage = None    # this is set as the first image, to link figure to

    # function for getting image labels.
    def getLabels(fullName, tagsList, pdList):
        name = fullName.split("/")[-1]
        return [name]
        
    # default function for getting labels is getName (or use datasets / tags)
    if "Image_Labels" in commandArgs:
        if commandArgs["Image_Labels"] == "Datasets":
            def getDatasets(name, tagsList, pdList):
                return [dataset for project, dataset in pdList]
            getLabels = getDatasets
        elif commandArgs["Image_Labels"] == "Tags":
            def getTags(name, tagsList, pdList):
                return tagsList
            getLabels = getTags
            
    # process the list of images. If imageIds is not set, script can't run. 
    log("Image details:")
    for idCount, imageId in enumerate(commandArgs["IDs"]):
        iId = long(imageId.getValue())
        image = containerService.getImages("Image", [iId], None)[0]
        if image == None:
            print "Image not found for ID:", iId
            continue
        imageIds.append(iId)
        if idCount == 0:
            omeroImage = image        # remember the first image to attach figure to
        pixelIds.append(image.getPrimaryPixels().getId().getValue())
        imageNames[iId] = image.getName().getValue()
    
    if len(imageIds) == 0:
        print "No image IDs specified."    
            
    pdMap = figUtil.getDatasetsProjectsFromImages(queryService, imageIds)    # a map of imageId : list of (project, dataset) names. 
    tagMap = figUtil.getTagsFromImages(metadataService, imageIds)
    # Build a legend entry for each image
    for iId in imageIds:
        name = imageNames[iId]
        imageDate = image.getAcquisitionDate().getValue()
        tagsList = tagMap[iId]
        pdList = pdMap[iId]
        
        tags = ", ".join(tagsList)
        pdString = ", ".join(["%s/%s" % pd for pd in pdList])
        log(" Image: %s  ID: %d" % (name, iId))
        log("  Date: %s" % date.fromtimestamp(imageDate/1000))
        log("  Tags: %s" % tags)
        log("  Project/Datasets: %s" % pdString)
        
        imageLabels.append(getLabels(name, tagsList, pdList))
    
    # use the first image to define dimensions, channel colours etc. 
    pixelsId = pixelIds[0]
    pixels = queryService.get("Pixels", pixelsId)

    sizeX = pixels.getSizeX().getValue();
    sizeY = pixels.getSizeY().getValue();
    sizeZ = pixels.getSizeZ().getValue();
    sizeC = pixels.getSizeC().getValue();

    
    width = sizeX
    if "Width" in commandArgs:
        w = commandArgs["Width"]
        try:
            width = int(w)
        except:
            log("Invalid width: %s Using default value: %d" % (str(w), sizeX))
    
    height = sizeY
    if "Height" in commandArgs:
        h = commandArgs["Height"]
        try:
            height = int(h)
        except:
            log("Invalid height: %s Using default value" % (str(h), sizeY))
            
    log("Image dimensions for all panels (pixels): width: %d  height: %d" % (width, height))
        
                        
    mergedIndexes = []    # the channels in the combined image, 
    mergedColours = {}    
    if "Merged_Colours" in commandArgs:
        cColourMap = commandArgs["Merged_Colours"]
        for c in cColourMap:
            rgb = cColourMap[c].getValue()
            rgba = imgUtil.RGBIntToRGBA(rgb)
            mergedColours[int(c)] = rgba
            mergedIndexes.append(int(c))
        mergedIndexes.sort()
    # make sure we have some merged channels
    if len(mergedIndexes) == 0:
        mergedIndexes = range(sizeC)
    mergedIndexes.reverse()
    
    mergedNames = False
    if "Merged_Names" in commandArgs:
        mergedNames = commandArgs["Merged_Names"]
        
    # Make channel-names map. If argument wasn't specified, name by index
    channelNames = {}
    if "Channel_Names" in commandArgs:
        cNameMap = commandArgs["Channel_Names"]
        for c in range(sizeC):
            if str(c) in cNameMap:
                channelNames[c] = cNameMap[str(c)].getValue()
            else: 
                channelNames[c] = str(c)
    else:
        for c in range(sizeC):
            channelNames[c] = str(c)
    
    # Make split-indexes list. If argument wasn't specified, include them all. 
    splitIndexes = []
    if "Split_Indexes" in commandArgs:
        for index in commandArgs["Split_Indexes"]:
            splitIndexes.append(index.getValue())
    else:
        for c in range(sizeC):
            splitIndexes = range(sizeC)
            
    colourChannels = True
    if "Split_Panels_Grey" in commandArgs and commandArgs["Split_Panels_Grey"]:
        colourChannels = False
    
    algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY
    if "Algorithm" in commandArgs:
        a = commandArgs["Algorithm"]
        if (a == "Mean Intensity"):
            algorithm = omero.constants.projection.ProjectionType.MEANINTENSITY
    
    stepping = 1
    if "Stepping" in commandArgs:
        s = commandArgs["Stepping"]
        if (0 < s < sizeZ):
            stepping = s
    
    scalebar = None
    if "Scalebar" in commandArgs:
        sb = commandArgs["Scalebar"]
        try:
            scalebar = int(sb)
            if scalebar <= 0:
                scalebar = None
            else:
                log("Scalebar is %d microns" % scalebar)
        except:
            log("Invalid value for scalebar: %s" % str(sb))
            scalebar = None
    
    overlayColour = (255,255,255)
    if "Overlay_Colour" in commandArgs:
        r,g,b,a = OVERLAY_COLOURS[commandArgs["Overlay_Colour"]]
        overlayColour = (r,g,b)
    
    roiZoom = None
    if "ROI_Zoom" in commandArgs:
        roiZoom = float(commandArgs["ROI_Zoom"])
        if roiZoom == 0:
            roiZoom = None
    
    roiLabel = "FigureROI"
    if "ROI_Label" in commandArgs:
        roiLabel = commandArgs["ROI_Label"]
        
    spacer = (width/50) + 2
    
    fig = getSplitView(session, imageIds, pixelIds, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, 
            mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, overlayColour, roiZoom, roiLabel)
    
    if fig == None:        # e.g. No ROIs found
        return                                                
    #fig.show()        # bug-fixing only
    
    log("")
    figLegend = "\n".join(logStrings)
    
    #print figLegend    # bug fixing only
    
    format = JPEG
    if "Format" in commandArgs:
        if commandArgs["Format"] == "PNG":
            format = PNG
            
    output = "roiFigure"
    if "Figure_Name" in commandArgs:
        output = str(commandArgs["Figure_Name"])
        
    if format == PNG:
        output = output + ".png"
        fig.save(output, "PNG")
    else:
        output = output + ".jpg"
        fig.save(output)
    
    # Use util method to upload the figure 'output' to the server, attaching it to the omeroImage, adding the 
    # figLegend as the fileAnnotation description. 
    # Returns the id of the originalFileLink child. (ID object, not value)
    fileAnnotation = scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, omeroImage, output, format, figLegend)
    return fileAnnotation
Пример #19
0
def makeFrapFigure(session, commandArgs):
	"""
	Main method called to make the figure. 
	Returns fileID object of the child of the fileAnnotation
	"""
	roiService = session.getRoiService()
	queryService = session.getQueryService()
	updateService = session.getUpdateService()
	rawFileStore = session.createRawFileStore()
	rawPixelStore = session.createRawPixelsStore()
	renderingEngine = session.createRenderingEngine()
	
	imageId = commandArgs["imageId"]
	
	theC = 0
	if "theC" in commandArgs:
		theC = commandArgs["theC"]
	
	image = queryService.get("Image", imageId)
	imageName = image.getName().getValue()
	
	query_string = "select p from Pixels p join fetch p.image i join fetch p.pixelsType pt where i.id='%d'" % imageId
	pixels = queryService.findByQuery(query_string, None)
	
	#pixels = image.getPrimaryPixels()
	pixelsId = pixels.getId().getValue()
	
	
	#sizeX = pixels.getSizeX().getValue()
	#sizeY = pixels.getSizeY().getValue()
	#sizeZ = pixels.getSizeZ().getValue()
	#sizeC = pixels.getSizeC().getValue()
	#sizeT = pixels.getSizeT().getValue()
	
	bypassOriginalFile = True
	rawPixelStore.setPixelsId(pixelsId, bypassOriginalFile)

	roiLabels = ["FRAP", "Base", "Whole"]
	
	roiMap = getEllipses(roiService, imageId, roiLabels)
	
	for l in roiLabels:
		if l not in roiMap.keys():
			print "ROI: '%s' not found. Cannot calculate FRAP" % l
			return
			
			
	frapMap = roiMap["FRAP"]
	baseMap = roiMap["Base"]
	wholeMap = roiMap["Whole"]
	
	# make a list of the t indexes that have all 3 of the Shapes we need. 
	# and a list of the roiShapes for easy access.
	tIndexes = []
	frapROI = []
	baseROI = []
	wholeROI = []
	for t in frapMap.keys():
		if t in baseMap.keys() and t in wholeMap.keys():
			tIndexes.append(t)	
			frapROI.append(frapMap[t])	
			baseROI.append(baseMap[t])	
			wholeROI.append(wholeMap[t])
	tIndexes.sort()
	
	log("T Indexes, " + ",".join([str(t) for t in tIndexes]))
	
	# get the actual plane times. 
	timeMap = figUtil.getTimes(queryService, pixelsId, tIndexes, theZ=0, theC=0)
	timeList = []
	for t in tIndexes:
		if t in timeMap:	
			timeList.append(timeMap[t])
		else:	# handles images which don't have PlaneInfo
			timeMap[t] = t
			timeList.append(t)
			
	log("Plane times (secs), " + ",".join([str(t) for t in timeList]))
	
	# lists of averageIntensity for the 3 ROIs 
	frapValues = []
	baseValues = []
	wholeValues = []
	
	frapBleach = None
	
	theZ = 0
	for i, t in enumerate(tIndexes):
		shapes = [frapROI[i], baseROI[i], wholeROI[i]]
		theZ = frapROI[i][4]	# get theZ from the FRAP ROI
		# get a list of the average values of pixels in the three shapes. 
		averages = analyseEllipses(shapes, pixels, rawPixelStore, theC, t, theZ)
		if frapBleach == None:	
			frapBleach = averages[0]
		else:
			frapBleach = min(frapBleach, averages[0])
		frapValues.append(averages[0])
		baseValues.append(averages[1])
		wholeValues.append(averages[2])

	log("FRAP Values, " + ",".join([str(v) for v in frapValues]))
	log("Base Values, " + ",".join([str(v) for v in baseValues]))
	log("Whole Values, " + ",".join([str(v) for v in wholeValues]))
	
	# find the time of the bleach event (lowest intensity )
	tBleach = frapValues.index(frapBleach)
	log("Pre-bleach frames, %d" % tBleach)
	if tBleach == 0:
		print "No pre-bleach images. Can't calculate FRAP"
		return
		
	# using frames before and after tBleach - calculate bleach ranges etc. 
	frapPre = average(frapValues[:tBleach]) - average(baseValues[:tBleach])
	wholePre = average(wholeValues[:tBleach]) - average(baseValues[:tBleach])
	wholePost = average(wholeValues[tBleach:]) - average(baseValues[tBleach:])

	# use these values to get a ratio of FRAP intensity / pre-Bleach intensity * (corrected by intensity of 'Whole' ROI)
	frapNormCorr = []
	for i in range(len(tIndexes)):
		frapNormCorr.append( (float(frapValues[i] - baseValues[i]) / frapPre) * (wholePre / float(wholeValues[i] - baseValues[i])) )
	
	log("FRAP Corrected, " + ",".join([str(v) for v in frapNormCorr]))
	
	# work out the range of recovery (bleach -> plateau) and the time to reach half of this after bleach. 
	frapBleachNormCorr = frapNormCorr[tBleach]
	plateauNormCorr = average(frapNormCorr[-5:])
	plateauMinusBleachNormCorr = plateauNormCorr - frapBleachNormCorr
	mobileFraction = plateauMinusBleachNormCorr / float(1 - frapBleachNormCorr)
	immobileFraction = 1 - mobileFraction
	halfMaxNormCorr = plateauMinusBleachNormCorr /2 + frapBleachNormCorr
	
	log("Corrected Bleach Intensity, %f" % frapBleachNormCorr)
	log("Corrected Plateau Intensity, %f" % plateauNormCorr)
	log("Plateau - Bleach, %f" % plateauMinusBleachNormCorr)
	log("Mobile Fraction, %f" % mobileFraction)
	log("Immobile Fraction, %f" % immobileFraction)
	log("Half Recovered Intensity, %f" % halfMaxNormCorr)

	# Define the T-half for this FRAP. In place of fitting an exact curve to the
	# data, find the two time-points that the half Max of recovery sits between
	# and find the T-half using a linear approximation between these two points.
	# The T-half is this solved for halfMaxNormCorr - timestamp(tBleach)
	th = None
	for t in tIndexes[tBleach:]:
		if halfMaxNormCorr < frapNormCorr[t]:
			th = tIndexes[t]
			break
	
	y1 = frapNormCorr[th-1]
	y2 = frapNormCorr[th]
	
	
	x1 = timeList[th-1]
	x2 = timeList[th]
	m1 = (y2-y1)/(x2-x1); #Gradient of the line
	c1 = y1-m1*x1;  #Y-intercept
	tHalf = (halfMaxNormCorr-c1)/m1 - timeList[tBleach]
	
	log("Bleach time, %f seconds" % timeList[tBleach])
	log("T-Half, %f seconds" % tHalf)
	
	figLegend = "\n".join(logLines)
	print figLegend
	
	# make PIL image of the last frame before FRAP
	spacer = 5
	frames = []
	ellipses = [frapROI[tBleach-1], frapROI[tBleach], frapROI[-1]]
	frames.append(getPlaneImage(renderingEngine, pixelsId, theZ, tIndexes[tBleach-1]))
	frames.append(getPlaneImage(renderingEngine, pixelsId, theZ, tIndexes[tBleach]))
	frames.append(getPlaneImage(renderingEngine, pixelsId, theZ, tIndexes[-1]))
	figW = 450
	font = imgUtil.getFont(16)
	fontH = font.getsize("FRAP")[1]
	labels = ["Pre-Bleach", "Bleach", "Recovery"]
	imgW = (figW - (2 * spacer) ) / len(frames)
	# shrink the images by width, or maintain height if shrink not needed. 
	smallImages = [imgUtil.resizeImage(img, imgW, img.size[1]) for img in frames]
	zoomOut = 1/imgUtil.getZoomFactor(img.size, imgW, img.size[1])
	figH = smallImages[0].size[1] + spacer + fontH 
	frapCanvas = Image.new("RGB", (figW, figH), (255,255,255))
	draw = ImageDraw.Draw(frapCanvas)
	y = spacer + fontH
	x = 0
	for l, img in enumerate(frames):
		label = labels[l]
		indent = (imgW - font.getsize(label)[0]) / 2
		draw.text((x+indent, 0), label, font=font, fill=(0,0,0))
		roiImage = addEllipse(smallImages[l], ellipses[l], zoomOut)
		imgUtil.pasteImage(roiImage, frapCanvas, x, y)
		x += spacer + imgW
	#frapCanvas.show()		# bug-fixing only
	fileName = imageName + ".png"
	frapCanvas.save(fileName, "PNG")
	
	format = PNG
	output = fileName
	
	# if reportLab has imported...
	if reportLab:
		# we are going to export a PDF, not a JPEG
		format = PDF
		output = imageName + ".pdf"
		
		# create a plot of curve fitted to:  y = 1 - e(It)
		# where thalf = ln 0.5 / -I
		# http://www.embl.de/eamnet/frap/html/halftime.html
		import math
		i = 1/float(tHalf) * math.log(0.5)
		fittedPoints = []
		for t in timeList[3:]:
			print math.exp(t * i)
			f = frapBleachNormCorr + ((plateauNormCorr-frapBleachNormCorr) * (1 - math.exp(t * i)))
			fittedPoints.append(f)
		print fittedPoints
		log("Fitted: , " + str(fittedPoints))
		
		# create a plot of the FRAP data
		figHeight = 450
		figWidth = 400
		drawing = Drawing(figWidth, figHeight)
		lp = LinePlot()
		lp.x = 50
		lp.y = 50
		lp.height = 300
		lp.width = 300
		lp.data = [zip(timeList, frapNormCorr), zip(timeList[3:], fittedPoints)]
		lp.lines[0].strokeColor = colors.red
		lp.lines[0].symbol = makeMarker('Circle')
		lp.lines[1].strokeColor = colors.green
		lp.lines[1].symbol = makeMarker('Circle')
		
		drawing.add(lp)
	
		drawing.add(String(200,25, 'Time (seconds)', fontSize=12, textAnchor="middle"))
		drawing.add(String(200,figHeight-25, imageName, fontSize=12, textAnchor="middle"))
		drawing.add(String(200,figHeight-50, 'T(1/2) = %f' % tHalf, fontSize=12, textAnchor="middle"))
	
		# create an A4 canvas to make the pdf figure 
		figCanvas = canvas.Canvas(output, pagesize=A4)
		pasteX = 100
		pasteY = 75
		# add the FRAP image
		figCanvas.drawImage(fileName, pasteX-25, pasteY)
		# add the FRAP data plot
		renderPDF.draw(drawing, figCanvas, pasteX, 300, showBoundary=True)
		figCanvas.save()
	
	fileId = scriptUtil.uploadAndAttachFile(queryService, updateService, rawFileStore, image, output, format, figLegend)
	return fileId