def runSpf(session, parameterMap):
    """
    This is where the action happens.
    We get the parameters, download the images, run Spider file from command line. 
    Then we get the output from the folder where the results should be, and upload these
    images into a new dataset. 
    """
    
    # create services we need 
    services = {}
    services["renderingEngine"] = session.createRenderingEngine()
    services["queryService"] = session.getQueryService()
    services["pixelsService"] = session.getPixelsService()
    services["rawPixelStore"] = session.createRawPixelsStore()
    services["updateService"] = session.getUpdateService()
    services["rawFileStore"] = session.createRawFileStore()
    containerService = session.getContainerService()
    
    queryService = services["queryService"]
    rawFileStore = services["rawFileStore"]
    rawPixelStore = services["rawPixelStore"]
    updateService = services["updateService"]
    
    imageIds = []
    imageNames = {}     # map of id:name
    
    dataType = parameterMap["Data_Type"]
    if dataType == "Image":
        for imageId in parameterMap["IDs"]:
            iId = long(imageId.getValue())
            imageIds.append(iId)
    else:   # Dataset
        for datasetId in parameterMap["IDs"]:
            datasetIds = []
            try:
                dId = long(datasetId.getValue())
                datasetIds.append(dId)
            except: pass
            # simply aggregate all images from the datasets
            images = containerService.getImages("Dataset", datasetIds, None)
            for i in images:
                iId = i.getId().getValue()
                imageIds.append(iId)
                imageNames[iId] = i.name.val
            
    if len(imageIds) == 0:
        return
        
    # get the project from the first image
    project = None
    dataset = None
    imageId = imageIds[0]
    query_string = "select i from Image i join fetch i.datasetLinks idl join fetch idl.parent d join fetch d.projectLinks pl join fetch pl.parent where i.id in (%s)" % imageId
    image = queryService.findByQuery(query_string, None)
    if image:
        for link in image.iterateDatasetLinks():
            dataset = link.parent
            print "Dataset", dataset.name.val
            for dpLink in dataset.iterateProjectLinks():
                project = dpLink.parent
                print "Project", project.name.val
                break # only use 1st Project
            break    # only use 1st Dataset
    
    if "New_Dataset_Name" in parameterMap:
        # make a dataset for images
        dataset = omero.model.DatasetI()
        dataset.name = rstring(parameterMap["New_Dataset_Name"])
        from datetime import datetime
        dt = datetime.now()
        dataset.description = rstring("Images generated by SPIDER procedure on %s" % dt.strftime("%A, %d. %B %Y %I:%M%p"))
        dataset = updateService.saveAndReturnObject(dataset)
        if project:        # and put it in the same project
            link = omero.model.ProjectDatasetLinkI()
            link.parent = omero.model.ProjectI(project.id.val, False)
            link.child = omero.model.DatasetI(dataset.id.val, False)
            updateService.saveAndReturnObject(link)
    
    
    
    fileExt = "dat"
        
    inputName = "input"
    if "Input_Name" in parameterMap:
        inputName = parameterMap["Input_Name"]
    outputName = "output"
    if "Output_Name" in parameterMap:
        outputName = parameterMap["Output_Name"]
            
    # get the procdure file
    spfName = "procedure.spf"
    spf = parameterMap["Spf"]
    spfText = None
    try:
        # either the user specified the SPF as a file annotation ID....
        spfFileId = long(spf)   
        annotation = queryService.get('FileAnnotation', spfFileId)
        origFileId = annotation.file.id.val
        originalFile = queryService.findByQuery("from OriginalFile as o where o.id = %s" % origFileId, None)
        scriptUtil.downloadFile(rawFileStore, originalFile, filePath=spfName)
    except:
        # or they specified Spider command and args separated by ; E.g. WI; 75,75 ; 1,75
        spfCommands = [cmd.strip() for cmd in spf.split(";")]
        spfCommands.insert(1, inputName)
        spfCommands.insert(2, outputName)
        spfText = "\n".join(spfCommands)
        spfFile = open(spfName, "w")
        spfFile.write(spfText) 
        spfFile.close()

    # run command. E.g. spider spf/dat @bat01
    spfCommand = "spider spf/%s @procedure" % fileExt
    
    spfText = open(spfName, 'r').read()
    print spfText
    # for each image, download it, run the spider command and upload result to OMERO
    inputImage = "%s.%s" % (inputName, fileExt)
    outputImage = "%s.%s" % (outputName, fileExt)
    pixelsType = None   # set by first image result - assume all the same
    newImages = []
    for i, imageId in enumerate(imageIds):
        downloadImage(queryService, rawPixelStore, imageId, inputImage)
        #print "Image downloaded to ", inputImage, os.path.exists(inputImage)
        #print spfCommand
        # Call SPIDER by command line...
        os.system(spfCommand)
        #print "Output image exists ", os.path.exists(outputImage)
        if not os.path.exists(outputImage):
            print "Image not created by SPIDER from Input Image ID: %s" % imageId
            continue
        if pixelsType == None:      pixelsType = getPixelsType(queryService, outputImage)
        name = None
        if imageId in imageNames:   name = imageNames[imageId]
        description = "Created from Image ID: %s with the Spider Procedure\n%s" % (imageId, spfText)
        image = uploadImageToDataset(session, services, pixelsType, outputImage, dataset, description, name)
        newImages.append(image)
        # attach Spf to new image (not so important, since we add the text to image description)
        # This creates a new FileAnnotationI for each image. Really want a singe FA linked to all images. 
        #scriptUtil.attachFileToParent(services["updateService"], image, originalFile)
    
    return (dataset, newImages)
Exemplo n.º 2
0
def run(commandArgs):

    # login details
    host = commandArgs["host"]
    user = commandArgs["username"]
    password = commandArgs["password"]

    client = omero.client(host)
    session = client.createSession(user, password)

    # create the services we need
    scriptService = session.getScriptService()
    rawFileStore = session.createRawFileStore()
    queryService = session.getQueryService()
    updateService = session.getUpdateService()
    containerService = session.getContainerService()

    ids = []
    d = omero.api.ContainerClass.Dataset

    if "image" in commandArgs:
        iId = long(commandArgs["image"])
        ids.append(iId)
    elif "dataset" in commandArgs:
        dIds = [long(commandArgs["dataset"])]
        images = containerService.getImages("Dataset", dIds, None)
        for i in images:
            ids.append(i.getId().getValue())
    else:
        print "No image or dataset ID given"
        return

    if len(ids) == 0:
        print "No images found"
        return

    # get the most recent (highest ID) original file with the correct script name
    scriptName = "Save_Image_As_Em.py"
    scriptId = -1
    for s in scriptService.getScripts():
        if s.getName().getValue() == scriptName:
            scriptId = max(scriptId, s.getId().getValue())

    print "Running script %s with ID: %s" % (scriptName, scriptId)

    imageIds = omero.rtypes.rlist([omero.rtypes.rlong(iId) for iId in ids])

    map = {
        "Image_IDs": imageIds,
    }

    if "extension" in commandArgs:
        map["Extension"] = omero.rtypes.rstring(commandArgs["extension"])

    results = None

    proc = scriptService.runScript(scriptId, map, None)
    try:
        cb = omero.scripts.ProcessCallbackI(client, proc)
        while not cb.block(1000):  # ms.
            pass
        cb.close()
        results = proc.getResults(0)  # ms
    finally:
        proc.close(False)

    path = None
    if "path" in commandArgs:
        path = commandArgs["path"]

    if "Original_Files" in results:
        for r in results["Original_Files"].getValue():
            # download the file from OMERO
            f = r.getValue()  # unloaded originalfile
            fileId = f.getId().getValue()
            originalFile = queryService.findByQuery(
                "from OriginalFile as o where o.id = %s" % fileId, None)
            name = originalFile.getName().getValue()
            if path:
                name = os.path.join(path, name)
            filePath = scriptUtil.downloadFile(rawFileStore, originalFile,
                                               name)
            print "Saved file at:", filePath
            # This only deletes the DB row, not the data on disk! utils.cleanse.py removes files that are not in db.
            updateService.deleteObject(originalFile)
    else:
        print "No files generated by %s script on the server" % scriptName
def run(commandArgs):
    
    # login details
    host = commandArgs["host"]
    user = commandArgs["username"]
    password = commandArgs["password"]
    
    client = omero.client(host)
    session = client.createSession(user, password)
    
    # create the services we need
    scriptService = session.getScriptService()
    rawFileStore = session.createRawFileStore()
    queryService = session.getQueryService()
    re = session.createRenderingEngine()
    updateService = session.getUpdateService()
    
    imageId = None
    
    if "image" in commandArgs:
        imageId = long(commandArgs["image"])
    else:
        print "No image ID given"
        return
    
    # get the most recent (highest ID) script with the correct name
    scriptName = "/EMAN2/Save_Image_As_Em.py"
    scriptId = scriptService.getScriptID(scriptName)
    
    print "Running %s with script ID: %s" % (scriptName, scriptId)
    imageIds = omero.rtypes.rlist([omero.rtypes.rlong(imageId)])
    
    map = {
        "Image_IDs": imageIds,
        "Extension": omero.rtypes.rstring("mrc")    # export as mrc file
    }
    
    results = None
    
    proc = scriptService.runScript(scriptId, map, None)
    try:
        cb = omero.scripts.ProcessCallbackI(client, proc)
        while not cb.block(1000): # ms.
            pass
        cb.close()
        results = proc.getResults(0)    # ms
    finally:
        proc.close(False)
        
    
    path = None
    if "path" in commandArgs:
        path = commandArgs["path"]
    
    
    fileNames = []    # need names for passing to chimera
    if "Original_Files" in results:
        for r in results["Original_Files"].getValue():
            # download the file from OMERO 
            f = r.getValue()
            fileId = f.getId().getValue()
            print "Downloading Original File ID:", fileId
            originalFile = queryService.findByQuery("from OriginalFile as o where o.id = %s" % fileId, None)
            name = originalFile.getName().getValue()
            if path:
                name = os.path.join(path, name)
            filePath = scriptUtil.downloadFile(rawFileStore, originalFile, name)
            print "   file saved to:", filePath
            fileNames.append(filePath)        # if 'name' file already exists, filePath will be different 
            # This only deletes the DB row, not the data on disk! utils.cleanse.py removes files that are not in db. 
            updateService.deleteObject(originalFile)
    else:
        print "No OriginalFileIds returned by script"
        if 'stdout' in results:
            origFile = results['stdout'].getValue()
            fileId = origFile.getId().getValue()
            print "\n******** Script: %s generated StdOut in file:%s  *******" % (scriptName, fileId)
            print scriptUtil.readFromOriginalFile(rawFileStore, queryService, fileId)
        if 'stderr' in results:
            origFile = results['stderr'].getValue()
            fileId = origFile.getId().getValue()
            print "\n******** Script: %s generated StdErr in file:%s  *******" % (scriptName, fileId)
            print scriptUtil.readFromOriginalFile(rawFileStore, queryService, fileId)
        return
        

    # need to get colours for each channel, to pass to chimera. Chimera uses [(0.0, 0.0, 1.0, 1.0),(0.0, 1.0, 0.0, 1.0)]
    # This returns e.g. [[0, 0, 255, 255], [0, 255, 0, 255], [255, 0, 0, 255], [255, 0, 0, 255]] but seems to work OK. 
    pixels = containerService.getImages('Image', [imageId], None)[0].getPrimaryPixels()
    pixelsId = pixels.getId().getValue()
    sizeC = pixels.getSizeC().getValue()
    colours = getColours(re, sizeC, pixelsId)
    
    # now we need to make a Chimera script to open the images (channels) and colour them! 
    scriptLines = []
    scriptLines.append("from chimera import openModels")
    scriptLines.append("from VolumeViewer import Volume")
    for fn in fileNames:
        scriptLines.append("openModels.open('%s')" % fn)
    scriptLines.append("colors = %s" % colours)        # the colours list is rendered suitably as a string 
    scriptLines.append("for c, v in enumerate(openModels.list(modelTypes=[Volume])):")
    scriptLines.append("    v.set_parameters(surface_colors = [colors[c]])")
    scriptLines.append("    v.show()")
    
    print "\n".join(scriptLines)
    
    scriptName = "colourMaps.py"
    f = open(scriptName, 'w')        # will overwrite each time. 
    for line in scriptLines:
        f.write(line)
        f.write("\n")
    f.close()
    
    command = "chimera --script %s" % scriptName
    print command
    os.system(command)