def process_data(conn,image,rectangles,coords): """ Get the coordinates in each roi, write to a file and append to dataset """ message = "" for i,rect in enumerate(rectangles): locs_list = [] for c in range(coords.shape[0]): locs = get_coords_in_roi(coords[c,:,:],rect) locs_list.append(locs) file_name = "coords_in_roi_%s.csv" % i try: f = open(file_name,'w') for r in range(locs.shape[0]): row = locs[r,:] f.write(','.join([str(c) for c in row])+'\n') finally: f.close() new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="wrote coords file", mimetype="text/csv", desc=None) message += faMessage return message
def saveImages(conn, scriptParams): dataType = scriptParams['Data_Type'] formatType = scriptParams['Format'] dirpath='tmp' #Use Root tmp dir so gets cleaned up unique_folder = 'Exportfiles' + str(time.time()) #random imagepath = path.join(dirpath, unique_folder) imagepath = path.sep + imagepath + path.sep print "Imagepath=", imagepath # Get the images or datasets message = "" objects, logMessage = script_utils.getObjects(conn, scriptParams) message += logMessage parent = objects[0] #?? if not objects: return None, message if dataType == 'Dataset': images = [] for ds in objects: images.extend(list(ds.listChildren())) if not images: message += "No image found in dataset(s)" return None, message else: images = objects imageIds = [i.getId() for i in images] print "Selected %d images for processing" % len(imageIds) #Create download folder if len(imageIds) > 0: mkdir(imagepath) #chdir(imagepath) imagefilenames =[] for iId in imageIds: img = conn.getObject("Image", iId) if img is not None: z = img.getSizeZ() / 2 t = 0 saveimage = img.renderImage(z,t) # returns PIL Image jpeg imageFilename = getImageName(img, formatType.lower(), imagepath) imagefilenames.append(imageFilename) message += saveAs(saveimage,imageFilename,formatType) #zip output files zipfilename = path.join(dirpath, unique_folder) zipfilename = path.sep + zipfilename + ".zip" message += createZipFile(zipfilename, imagefilenames) mimetype = 'application/zip' outputDisplayName = "Image export zip" namespace = NSCREATED + "/QBI/Utils/ExportImage" fileAnnotation, annMessage = script_utils.createLinkFileAnnotation( conn, zipfilename, parent, output=outputDisplayName, ns=namespace, mimetype=mimetype) message += annMessage return fileAnnotation, message
def process_data(conn, image, file_type, rectangles, localisations, scalex, scaley, cIndex=0): """ Get the coordinates in the ROI and form histograms of x and y coordinates @param conn: the BlitzGateWay connection @param image: the image containing the ROIs to process. the image were the localisations file is attached. @param file_type: what type of dataset are we dealing with (see FILE_TYPES global) @param rectangles: the ROIs scaled to nm @param coords: the xy coordinates of localisations being histogrammed @param scalex: multiplier to pixel size to determine bin size in histograms @param scaley: multiplier to pixel size to determine bin size in histograms """ message = "" pixels = image.getPrimaryPixels() # note pixel sizes (if available) to set for the new images physX = pixels.getPhysicalSizeX() * 1000.0 physY = pixels.getPhysicalSizeY() * 1000.0 x = file_type["x_col"] y = file_type["y_col"] for i, rect in enumerate(rectangles): binsx = ceil((rect[2] / physX) / scalex) binsy = ceil((rect[3] / physY) / scaley) print "binsx,binsy:", binsx, binsy rangex = rect[2] rangey = rect[3] print "rangex,rangey:", rangex, rangey locs_df = get_coords_in_roi(localisations[cIndex], rect, file_type) histx, edgesx = np.histogram(locs_df.loc[:, [x]].values, bins=binsx) histy, edgesy = np.histogram(locs_df.loc[:, [y]].values, bins=binsy) hist_dataX = np.zeros((histx.shape[0], 1)) hist_dataX[:, 0] = histx hist_dataY = np.zeros((histy.shape[0], 1)) hist_dataY[:, 0] = histy centersx = 0.5 * (edgesx[1:] + edgesx[:-1]) centersy = 0.5 * (edgesy[1:] + edgesy[:-1]) centers_dataX = np.zeros((centersx.shape[0], 1)) centers_dataX[:, 0] = centersx centers_dataY = np.zeros((centersy.shape[0], 1)) centers_dataY[:, 0] = centersy file_name = "scatter_hist_in_roi_%s.csv" % i with file(file_name, "w") as outfile: outfile.write("# scatter histogram data in x-direction\n") datax = np.concatenate((centers_dataX, hist_dataX), axis=1) np.savetxt(outfile, datax, fmt="%-7.2f", delimiter=",", newline="\n") outfile.write("# scatter histogram data in y-direction\n") datay = np.concatenate((centers_dataY, hist_dataY), axis=1) np.savetxt(outfile, datay, fmt="%-7.2f", delimiter=",", newline="\n") new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="wrote coords file for ROI %s" % i, mimetype="text/csv", desc=None ) message += faMessage return message
def process_data(conn, image, file_type, rectangles, coords): """ Get the coordinates in each roi, write to a file and append to dataset """ message = "" sizeT = image.getSizeT() frame = FILE_TYPES[file_type]['frame'] if sizeT > 1: desc = image.getDescription() if desc: start = desc.index('Start') stop = desc.index('Stop') starts = desc[start + 7:stop - 3] starts = [int(s) for s in starts.split(',')] stops = desc[stop + 6:len(desc) - 1] stops = [int(s) for s in stops.split(',')] else: starts = [1] stops = [coords[0][frame].max] def coord_gen(): for rect in rectangles: for c in range(len(coords)): locs_df = coords[c] for t in range(sizeT): coords_in_frames = locs_df[(locs_df[frame] >= starts[t]) & (locs_df[frame] <= stops[t])] yield get_coords_in_roi(coords_in_frames, rect[:-1], file_type) coord_generator = coord_gen() for rect in rectangles: rid = rect[-1] for c in range(len(coords)): for t in range(sizeT): file_name = "Coords_ROI%s_Time%s_Channel%s.csv" % (rid, t, c) with open(file_name, 'w') as f: locs_df = coord_generator.next() locs_df.to_csv(f, sep=',', float_format='%8.2f', index=False, encoding='utf-8') new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="wrote coords file for ROI %s" % rid, mimetype="text/csv", desc=None) message += faMessage return message
def attach_results(conn,ann,image,data,sizeC,sizeR): file_name = "ripleyl_plot_" + ann.getFile().getName()[:-4] + '.csv' with file(file_name, 'w') as outfile: outfile.write('# ripley data for %s channels and %s ROIs: \n' % (sizeC, sizeR )) data.to_csv(outfile,sep=',',float_format='%8.2f',index=False,encoding='utf-8') description = "Ripley L function data created from:\n Image ID: %d Annotation_ID: %d"\ % (image.getId(),ann.getId()) new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="Ripley L Plot csv (Excel) file", mimetype="text/csv", desc=description) return new_file_ann
def process_data(conn,image,file_type,rectangles,coords): """ Get the coordinates in each roi, write to a file and append to dataset """ message = "" sizeT = image.getSizeT() frame = FILE_TYPES[file_type]['frame'] if sizeT > 1: desc = image.getDescription() if desc: start = desc.index('Start') stop = desc.index('Stop') starts = desc[start+7:stop-3] starts = [int(s) for s in starts.split(',')] stops = desc[stop+6:len(desc)-1] stops = [int(s) for s in stops.split(',')] else: starts = [1] stops = [coords[0][frame].max] def coord_gen(): for rect in rectangles: for c in range(len(coords)): locs_df = coords[c] for t in range(sizeT): coords_in_frames = locs_df[(locs_df[frame]>= starts[t]) & (locs_df[frame]<= stops[t])] yield get_coords_in_roi(coords_in_frames,rect[:-1],file_type) coord_generator = coord_gen() for rect in rectangles: rid = rect[-1] for c in range(len(coords)): for t in range(sizeT): file_name = "Coords_ROI%s_Time%s_Channel%s.csv" % (rid,t,c) with open(file_name,'w') as f: locs_df = coord_generator.next() locs_df.to_csv(f,sep=',',float_format='%8.2f',index=False,encoding='utf-8') new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="wrote coords file for ROI %s" %rid, mimetype="text/csv", desc=None) message += faMessage return message
def attach_results(conn, ann, image, data, sizeC, sizeR): file_name = "ripleyl_plot_" + ann.getFile().getName()[:-4] + '.csv' with file(file_name, 'w') as outfile: outfile.write('# ripley data for %s channels and %s ROIs: \n' % (sizeC, sizeR)) data.to_csv(outfile, sep=',', float_format='%8.2f', index=False, encoding='utf-8') description = "Ripley L function data created from:\n Image ID: %d Annotation_ID: %d"\ % (image.getId(),ann.getId()) new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="Ripley L Plot csv (Excel) file", mimetype="text/csv", desc=description) return new_file_ann
def processImages(conn, scriptParams): lineWidth = scriptParams['Line_Width'] fileAnns = [] message = "" # Get the images images, logMessage = scriptUtil.getObjects(conn, scriptParams) message += logMessage if not images: return None, message # Check for line and polyline ROIs and filter images list images = [image for image in images if image.getROICount(["Polyline", "Line"]) > 0] if not images: message += "No ROI containing line or polyline was found." return None, message for image in images: cNames = [] colors = [] for ch in image.getChannels(): cNames.append(ch.getLabel()) colors.append(ch.getColor().getRGB()) sizeC = image.getSizeC() if 'Channels' in scriptParams: scriptParams['Channels'] = [i-1 for i in scriptParams['Channels']] # Convert user input from 1-based to 0-based for i in scriptParams['Channels']: print i, type(i) else: scriptParams['Channels'] = range(sizeC) # channelMinMax = [] # for c in image.getChannels(): # minC = c.getWindowMin() # maxC = c.getWindowMax() # channelMinMax.append((minC, maxC)) roiService = conn.getRoiService() result = roiService.findByImage(image.getId(), None) lines = [] polylines = [] for roi in result.rois: roiId = roi.getId().getValue() for s in roi.copyShapes(): theZ = s.getTheZ() and s.getTheZ().getValue() or 0 theT = s.getTheT() and s.getTheT().getValue() or 0 # TODO: Add some filter of shapes. E.g. text? / 'lines' only # etc. if type(s) == omero.model.LineI: x1 = s.getX1().getValue() x2 = s.getX2().getValue() y1 = s.getY1().getValue() y2 = s.getY2().getValue() lines.append({'id': roiId, 'theT': theT, 'theZ': theZ, 'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2}) elif type(s) == omero.model.PolylineI: points = pointsStringToXYlist(s.getPoints().getValue()) polylines.append({'id': roiId, 'theT': theT, 'theZ': theZ, 'points': points}) if len(lines) == 0 and len(polylines) == 0: print "Image: %s had no lines or polylines" % image.getId() continue # prepare column headers, including line-id if we are going to output # raw data. lineId = scriptParams['Sum_or_Average'] == 'Average, with raw data' \ and 'Line, ' or "" colHeader = 'Image_ID, ROI_ID, Z, T, C, %sLine data %s of Line" \ " Width %s\n' % (lineId, scriptParams['Sum_or_Average'], scriptParams['Line_Width']) print 'colHeader', colHeader # prepare a csv file to write our data to... fileName = "Plot_Profile_%s.csv" % image.getId() try: f = open(fileName, 'w') f.write(colHeader) if len(lines) > 0: processLines(conn, scriptParams, image, lines, lineWidth, f) if len(polylines) > 0: processPolyLines( conn, scriptParams, image, polylines, lineWidth, f) finally: f.close() fileAnn, faMessage = scriptUtil.createLinkFileAnnotation( conn, fileName, image, output="Line Plot csv (Excel) file", mimetype="text/csv", desc=None) if fileAnn: fileAnns.append(fileAnn) if not fileAnns: faMessage = "No Analysis files created. See 'Info' or 'Error' for"\ " more details" elif len(fileAnns) > 1: faMessage = "Created %s csv (Excel) files" % len(fileAnns) message += faMessage return fileAnns, message
def makeThumbnailFigure(conn, scriptParams): """ Makes the figure using the parameters in @scriptParams, attaches the figure to the parent Project/Dataset, and returns the file-annotation ID @ returns Returns the id of the originalFileLink child. (ID object, not value) """ log("Thumbnail figure created by OMERO") log("") message = "" # Get the objects (images or datasets) objects, logMessage = scriptUtil.getObjects(conn, scriptParams) message += logMessage if not objects: return None, message # Get parent parent = None if "Parent_ID" in scriptParams and len(scriptParams["IDs"]) > 1: if scriptParams["Data_Type"] == "Image": parent = conn.getObject("Dataset", scriptParams["Parent_ID"]) else: parent = conn.getObject("Project", scriptParams["Parent_ID"]) if parent is None: parent = objects[0] # Attach figure to the first object parentClass = parent.OMERO_CLASS log("Figure will be linked to %s%s: %s" % (parentClass[0].lower(), parentClass[1:], parent.getName())) tagIds = [] if "Tag_IDs" in scriptParams: tagIds = scriptParams['Tag_IDs'] if len(tagIds) == 0: tagIds = None showUntagged = False if (tagIds): showUntagged = scriptParams["Show_Untagged_Images"] thumbSize = scriptParams["Thumbnail_Size"] maxColumns = scriptParams["Max_Columns"] figHeight = 0 figWidth = 0 dsCanvases = [] if scriptParams["Data_Type"] == "Dataset": for dataset in objects: log("Dataset: %s ID: %d" % (dataset.getName(), dataset.getId())) images = list(dataset.listChildren()) dsCanvas = paintDatasetCanvas( conn, images, dataset.getName(), tagIds, showUntagged, length=thumbSize, colCount=maxColumns) if dsCanvas is None: continue dsCanvases.append(dsCanvas) figHeight += dsCanvas.size[1] figWidth = max(figWidth, dsCanvas.size[0]) else: imageCanvas = paintDatasetCanvas( conn, objects, "", tagIds, showUntagged, length=thumbSize, colCount=maxColumns) dsCanvases.append(imageCanvas) figHeight += imageCanvas.size[1] figWidth = max(figWidth, imageCanvas.size[0]) if len(dsCanvases) == 0: message += "No figure created" return None, message figure = Image.new("RGB", (figWidth, figHeight), WHITE) y = 0 for ds in dsCanvases: imgUtil.pasteImage(ds, figure, 0, y) y += ds.size[1] log("") figLegend = "\n".join(logLines) format = scriptParams["Format"] figureName = scriptParams["Figure_Name"] figureName = os.path.basename(figureName) output = "localfile" if format == 'PNG': output = output + ".png" figureName = figureName + ".png" figure.save(output, "PNG") mimetype = "image/png" elif format == 'TIFF': output = output + ".tiff" figureName = figureName + ".tiff" figure.save(output, "TIFF") mimetype = "image/tiff" else: output = output + ".jpg" figureName = figureName + ".jpg" figure.save(output) mimetype = "image/jpeg" namespace = NSCREATED + "/omero/figure_scripts/Thumbnail_Figure" fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation( conn, output, parent, output="Thumbnail figure", mimetype=mimetype, ns=namespace, desc=figLegend, origFilePathAndName=figureName) message += faMessage return fileAnnotation, message
def run_processing(conn,script_params): file_anns = [] message = "" imageIds = [] image_id = script_params['ImageID'] imageIds.append(image_id) image = conn.getObject("Image",image_id) if not image: message = 'Could not find specified image' return message file_id = script_params['AnnotationID'] ann = conn.getObject("Annotation",file_id) if not ann: message = 'Could not find specified annotation' return message radius = script_params['Radius'] #other parameters if script_params['Convert_coordinates_to_nm']: cam_pix_size = script_params['Parent_Image_Pixel_Size'] else: cam_pix_size = 1 file_type = FILE_TYPES[script_params['File_Type']] path_to_ann = ann.getFile().getPath() + '/' + ann.getFile().getName() name,ext = os.path.splitext(path_to_ann) if ('txt' in ext) or ('csv' in ext): #get the path to the downloaded data path_to_data = download_data(ann) #get all the xy coords in that data locs = parse_sr_data(path_to_data,file_type,cam_pix_size) sizeC = len(locs) #get the rois to be processed rectangles = get_rectangles(conn,image_id) print 'rectanges:',rectangles #calculate local density locs_density = process_data(conn,image,file_type,sizeC,rectangles,locs,radius) #write the data to a csv file_name = "localisation_density_" + ann.getFile().getName()[:-4] + '.csv' with file(file_name, 'a') as outfile: outfile.write('# localisation density data for %s channels and %s ROIs: \n' % (sizeC, len(locs_density))) for r in range(len(locs_density)): outfile.write('# ROI %s\n' % rectangles[r][-1]) outfile.write('Channel,%s,%s,Density within %s [nm]\n' % (file_type['x_col'],file_type['y_col'],str(radius))) density = locs_density[r][1:,:] np.savetxt(outfile, density, fmt='%-7.2f', delimiter=',', newline='\n') new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="Wrote localisation density csv (Excel) file", mimetype="text/csv", desc=None) if new_file_ann: file_anns.append(new_file_ann) if not file_anns: faMessage = "No Analysis files created. See 'Info' or 'Error' for"\ " more details" elif len(file_anns) > 1: faMessage = "Created %s csv (Excel) files" % len(file_anns) else: message = 'file annotation must be txt or csv' return message # clean up delete_downloaded_data(ann) message += faMessage return message
def processImages(conn, scriptParams): lineWidth = scriptParams['Line_Width'] fileAnns = [] message = "" # Get the images images, logMessage = scriptUtil.getObjects(conn, scriptParams) message += logMessage if not images: return None, message # Check for line and polyline ROIs and filter images list images = [ image for image in images if image.getROICount(["Polyline", "Line"]) > 0 ] if not images: message += "No ROI containing line or polyline was found." return None, message for image in images: cNames = [] colors = [] for ch in image.getChannels(): cNames.append(ch.getLabel()) colors.append(ch.getColor().getRGB()) sizeC = image.getSizeC() if 'Channels' in scriptParams: scriptParams['Channels'] = [ i - 1 for i in scriptParams['Channels'] ] # Convert user input from 1-based to 0-based for i in scriptParams['Channels']: print i, type(i) else: scriptParams['Channels'] = range(sizeC) # channelMinMax = [] # for c in image.getChannels(): # minC = c.getWindowMin() # maxC = c.getWindowMax() # channelMinMax.append((minC, maxC)) roiService = conn.getRoiService() result = roiService.findByImage(image.getId(), None) lines = [] polylines = [] for roi in result.rois: roiId = roi.getId().getValue() for s in roi.copyShapes(): theZ = s.getTheZ() and s.getTheZ().getValue() or 0 theT = s.getTheT() and s.getTheT().getValue() or 0 # TODO: Add some filter of shapes. E.g. text? / 'lines' only # etc. if type(s) == omero.model.LineI: x1 = s.getX1().getValue() x2 = s.getX2().getValue() y1 = s.getY1().getValue() y2 = s.getY2().getValue() lines.append({ 'id': roiId, 'theT': theT, 'theZ': theZ, 'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2 }) elif type(s) == omero.model.PolylineI: points = pointsStringToXYlist(s.getPoints().getValue()) polylines.append({ 'id': roiId, 'theT': theT, 'theZ': theZ, 'points': points }) if len(lines) == 0 and len(polylines) == 0: print "Image: %s had no lines or polylines" % image.getId() continue # prepare column headers, including line-id if we are going to output # raw data. lineId = scriptParams['Sum_or_Average'] == 'Average, with raw data' \ and 'Line, ' or "" colHeader = 'Image_ID, ROI_ID, Z, T, C, %sLine data %s of Line" \ " Width %s\n' % (lineId, scriptParams['Sum_or_Average'], scriptParams['Line_Width']) print 'colHeader', colHeader # prepare a csv file to write our data to... fileName = "Plot_Profile_%s.csv" % image.getId() try: f = open(fileName, 'w') f.write(colHeader) if len(lines) > 0: processLines(conn, scriptParams, image, lines, lineWidth, f) if len(polylines) > 0: processPolyLines(conn, scriptParams, image, polylines, lineWidth, f) finally: f.close() fileAnn, faMessage = scriptUtil.createLinkFileAnnotation( conn, fileName, image, output="Line Plot csv (Excel) file", mimetype="text/csv", desc=None) if fileAnn: fileAnns.append(fileAnn) if not fileAnns: faMessage = "No Analysis files created. See 'Info' or 'Error' for"\ " more details" elif len(fileAnns) > 1: faMessage = "Created %s csv (Excel) files" % len(fileAnns) message += faMessage return fileAnns, message
def splitViewFigure(conn, scriptParams): """ Processes the arguments, populating defaults if necessary. Prints the details to log (fig-legend). Even handles missing arguments that are not optional (from when this ran from commandline with everything optional) then calls makeSplitViewFigure() to make the figure, attaches it to the Image as an 'originalFile' annotation, with fig-legend as the description. @return: the id of the originalFileLink child. (ID object, not value) """ log("Split-View figure created by OMERO on %s" % date.today()) log("") message = "" # message to be returned to the client imageIds = [] pixelIds = [] imageLabels = [] # function for getting image labels. def getLabels(fullName, tagsList, pdList): name = fullName.split("/")[-1] return [name] # default function for getting labels is getName (or use datasets / tags) if scriptParams["Image_Labels"] == "Datasets": def getDatasets(name, tagsList, pdList): return [dataset for project, dataset in pdList] getLabels = getDatasets elif scriptParams["Image_Labels"] == "Tags": def getTags(name, tagsList, pdList): return tagsList getLabels = getTags # Get the images images, logMessage = scriptUtil.getObjects(conn, scriptParams) message += logMessage if not images: return None, message # Attach figure to the first image omeroImage = images[0] # process the list of images log("Image details:") for image in images: imageIds.append(image.getId()) pixelIds.append(image.getPrimaryPixels().getId()) pdMap = figUtil.getDatasetsProjectsFromImages( conn.getQueryService(), imageIds) # a map of imageId : list of (project, dataset) names. tagMap = figUtil.getTagsFromImages(conn.getMetadataService(), imageIds) # Build a legend entry for each image for image in images: name = image.getName() imageDate = image.getAcquisitionDate() iId = image.getId() tagsList = tagMap[iId] pdList = pdMap[iId] tags = ", ".join(tagsList) pdString = ", ".join(["%s/%s" % pd for pd in pdList]) log(" Image: %s ID: %d" % (name, iId)) log(" Date: %s" % date.fromtimestamp(imageDate / 1000)) log(" Tags: %s" % tags) log(" Project/Datasets: %s" % pdString) imageLabels.append(getLabels(name, tagsList, pdList)) # use the first image to define dimensions, channel colours etc. sizeX = omeroImage.getSizeX() sizeY = omeroImage.getSizeY() sizeZ = omeroImage.getSizeZ() sizeC = omeroImage.getSizeC() # set image dimensions zStart = -1 zEnd = -1 if "Z_Start" in scriptParams: zStart = scriptParams["Z_Start"] if "Z_End" in scriptParams: zEnd = scriptParams["Z_End"] width = "Width" in scriptParams and scriptParams["Width"] or sizeX height = "Height" in scriptParams and scriptParams["Height"] or sizeY log("Image dimensions for all panels (pixels): width: %d height: %d" % (width, height)) # Make split-indexes list. If argument wasn't specified, include them all. splitIndexes = [] if "Split_Indexes" in scriptParams: splitIndexes = scriptParams["Split_Indexes"] else: splitIndexes = range(sizeC) # Make channel-names map. If argument wasn't specified, name by index channelNames = {} for c in range(sizeC): channelNames[c] = str(c) if "Channel_Names" in scriptParams: cNameMap = scriptParams["Channel_Names"] for c in cNameMap: index = int(c) channelNames[index] = cNameMap[c] mergedIndexes = [] # the channels in the combined image, mergedColours = {} if "Merged_Colours" in scriptParams: cColourMap = scriptParams["Merged_Colours"] for c in cColourMap: rgb = cColourMap[c] rgba = imgUtil.RGBIntToRGBA(rgb) mergedColours[int(c)] = rgba mergedIndexes.append(int(c)) mergedIndexes.sort() else: mergedIndexes = range(sizeC) colourChannels = not scriptParams["Split_Panels_Grey"] algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY if "Mean Intensity" == scriptParams["Algorithm"]: algorithm = omero.constants.projection.ProjectionType.MEANINTENSITY stepping = min(scriptParams["Stepping"], sizeZ) scalebar = None if "Scalebar" in scriptParams: scalebar = scriptParams["Scalebar"] log("Scalebar is %d microns" % scalebar) r, g, b, a = OVERLAY_COLOURS[scriptParams["Overlay_Colour"]] overlayColour = (r, g, b) mergedNames = scriptParams["Merged_Names"] print "splitIndexes", splitIndexes print "channelNames", channelNames print "colourChannels", colourChannels print "mergedIndexes", mergedIndexes print "mergedColours", mergedColours print "mergedNames", mergedNames fig = makeSplitViewFigure(conn, pixelIds, zStart, zEnd, splitIndexes, channelNames, colourChannels, mergedIndexes, mergedColours, mergedNames, width, height, imageLabels, algorithm, stepping, scalebar, overlayColour) figLegend = "\n".join(logStrings) format = JPEG if scriptParams["Format"] == "PNG": format = PNG output = scriptParams["Figure_Name"] if format == PNG: output = output + ".png" fig.save(output, "PNG") mimetype = "image/png" else: output = output + ".jpg" fig.save(output) mimetype = "image/jpeg" # Upload the figure 'output' to the server, creating a file annotation and attaching it to the omeroImage, adding the # figLegend as the fileAnnotation description. namespace = omero.constants.namespaces.NSCREATED + "/omero/figure_scripts/Split_View_Figure" fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation( conn, output, omeroImage, output="Split view figure", mimetype=mimetype, ns=namespace, desc=figLegend) message += faMessage return fileAnnotation, message
def analyseImage(conn, image, cIndex): print "\n---------------------" print "Analysing Image: ", image.getName() # Get dictionary of tIndex:ellipse ellipses = getEllipses(conn, image.getId()) # Get dictionary of tIndex:averageIntensity intensityData = getEllipseData(image, ellipses, cIndex) # Get dictionary of tIndex:timeStamp (secs) timeValues = getTimes(conn, image) # We now have all the Data we need from OMERO # create lists of times (secs) and intensities... timeList = [] valueList = [] # ...Ordered by tIndex for t in range(image.getSizeT()): if t in intensityData: timeList.append(timeValues[t]) valueList.append(intensityData[t]) print "Analysing pixel values for %s time points" % len(timeList) # Find the bleach intensity & time bleachValue = min(valueList) bleachTindex = valueList.index(bleachValue) bleachTime = timeList[bleachTindex] preBleachValue = valueList[bleachTindex-1] print "Bleach at tIndex: %s, TimeStamp: %0.2f seconds" \ % (bleachTindex, bleachTime) print "Before Bleach: %0.2f, After Bleach: %0.2f" \ % (preBleachValue, bleachValue) # Use last timepoint for max recovery recoveryValue = valueList[-1] endTimepoint = timeList[-1] mobileFraction = (recoveryValue - bleachValue) / \ (preBleachValue - bleachValue) print "Recovered to: %0.2f, after %0.2f seconds" \ % (recoveryValue, endTimepoint) print "Mobile Fraction: %0.2f" % mobileFraction halfRecovery = (recoveryValue + bleachValue)/2 # quick & dirty - pick the first timepoint where we exceed half recovery recoveryValues = valueList[bleachTindex:] # just the values & times after bleach time recoveryTimes = timeList[bleachTindex:] for t, v in zip(recoveryTimes, recoveryValues): if v >= halfRecovery: tHalf = t - bleachTime break print "tHalf: %0.2f seconds" % tHalf csvLines = [ "Time (secs)," + ",".join([str(t) for t in timeList]), "\n", "Average pixel value," + ",".join([str(v) for v in valueList]), "\n", "tHalf (secs), %0.2f seconds" % tHalf, "mobileFraction, %0.2f" % mobileFraction ] f = open("FRAP.csv", "w") f.writelines(csvLines) f.close() namespace = "/omero-user-scripts/example/Simple_FRAP/" scriptUtil.createLinkFileAnnotation(conn, "FRAP.csv", image, ns=namespace) return tHalf
def batchImageExport(conn, scriptParams): # for params with default values, we can get the value directly splitCs = scriptParams["Export_Individual_Channels"] mergedCs = scriptParams["Export_Merged_Image"] greyscale = scriptParams["Individual_Channels_Grey"] dataType = scriptParams["Data_Type"] folder_name = scriptParams["Folder_Name"] folder_name = os.path.basename(folder_name) format = scriptParams["Format"] projectZ = "Choose_Z_Section" in scriptParams and \ scriptParams["Choose_Z_Section"] == 'Max projection' if (not splitCs) and (not mergedCs): log("Not chosen to save Individual Channels OR Merged Image") return # check if we have these params channelNames = [] if "Channel_Names" in scriptParams: channelNames = scriptParams["Channel_Names"] zoomPercent = None if "Zoom" in scriptParams and scriptParams["Zoom"] != "100%": zoomPercent = int(scriptParams["Zoom"][:-1]) # functions used below for each imaage. def getZrange(sizeZ, scriptParams): zRange = None if "Choose_Z_Section" in scriptParams: zChoice = scriptParams["Choose_Z_Section"] # NB: all Z indices in this script are 1-based if zChoice == 'ALL Z planes': zRange = (1, sizeZ+1) elif "OR_specify_Z_index" in scriptParams: zIndex = scriptParams["OR_specify_Z_index"] zIndex = min(zIndex, sizeZ) zRange = (zIndex,) elif "OR_specify_Z_start_AND..." in scriptParams and \ "...specify_Z_end" in scriptParams: start = scriptParams["OR_specify_Z_start_AND..."] start = min(start, sizeZ) end = scriptParams["...specify_Z_end"] end = min(end, sizeZ) # in case user got zStart and zEnd mixed up zStart = min(start, end) zEnd = max(start, end) if zStart == zEnd: zRange = (zStart,) else: zRange = (zStart, zEnd+1) return zRange def getTrange(sizeT, scriptParams): tRange = None if "Choose_T_Section" in scriptParams: tChoice = scriptParams["Choose_T_Section"] # NB: all T indices in this script are 1-based if tChoice == 'ALL T planes': tRange = (1, sizeT+1) elif "OR_specify_T_index" in scriptParams: tIndex = scriptParams["OR_specify_T_index"] tIndex = min(tIndex, sizeT) tRange = (tIndex,) elif "OR_specify_T_start_AND..." in scriptParams and \ "...specify_T_end" in scriptParams: start = scriptParams["OR_specify_T_start_AND..."] start = min(start, sizeT) end = scriptParams["...specify_T_end"] end = min(end, sizeT) # in case user got zStart and zEnd mixed up tStart = min(start, end) tEnd = max(start, end) if tStart == tEnd: tRange = (tStart,) else: tRange = (tStart, tEnd+1) return tRange # Get the images or datasets message = "" objects, logMessage = script_utils.getObjects(conn, scriptParams) message += logMessage if not objects: return None, message # Attach figure to the first image parent = objects[0] if dataType == 'Dataset': images = [] for ds in objects: images.extend(list(ds.listChildren())) if not images: message += "No image found in dataset(s)" return None, message else: images = objects log("Processing %s images" % len(images)) # somewhere to put images curr_dir = os.getcwd() exp_dir = os.path.join(curr_dir, folder_name) try: os.mkdir(exp_dir) except: pass # max size (default 12kx12k) size = conn.getDownloadAsMaxSizeSetting() size = int(size) ids = [] # do the saving to disk for img in images: pixels = img.getPrimaryPixels() if (pixels.getId() in ids): continue ids.append(pixels.getId()) sizeX = pixels.getSizeX() sizeY = pixels.getSizeY() if sizeX*sizeY > size: log(" ** Can't export a 'Big' image to %s. **" % format) if len(images) == 1: return None, "Can't export a 'Big' image to %s." % format continue else: log("Exporting image as %s: %s" % (format, img.getName())) if format == 'OME-TIFF': saveAsOmeTiff(conn, img, folder_name) else: if img._prepareRE().requiresPixelsPyramid(): log(" ** Can't export a 'Big' image to OME-TIFF. **") log("\n----------- Saving planes from image: '%s' ------------" % img.getName()) sizeC = img.getSizeC() sizeZ = img.getSizeZ() sizeT = img.getSizeT() zRange = getZrange(sizeZ, scriptParams) tRange = getTrange(sizeT, scriptParams) log("Using:") if zRange is None: log(" Z-index: Last-viewed") elif len(zRange) == 1: log(" Z-index: %d" % zRange[0]) else: log(" Z-range: %s-%s" % (zRange[0], zRange[1]-1)) if projectZ: log(" Z-projection: ON") if tRange is None: log(" T-index: Last-viewed") elif len(tRange) == 1: log(" T-index: %d" % tRange[0]) else: log(" T-range: %s-%s" % (tRange[0], tRange[1]-1)) log(" Format: %s" % format) if zoomPercent is None: log(" Image Zoom: 100%") else: log(" Image Zoom: %s" % zoomPercent) log(" Greyscale: %s" % greyscale) log("Channel Rendering Settings:") for ch in img.getChannels(): log(" %s: %d-%d" % (ch.getLabel(), ch.getWindowStart(), ch.getWindowEnd())) try: savePlanesForImage( conn, img, sizeC, splitCs, mergedCs, channelNames, zRange, tRange, greyscale, zoomPercent, projectZ=projectZ, format=format, folder_name=folder_name) finally: # Make sure we close Rendering Engine img._re.close() # write log for exported images (not needed for ome-tiff) logFile = open(os.path.join(exp_dir, 'Batch_Image_Export.txt'), 'w') try: for s in logStrings: logFile.write(s) logFile.write("\n") finally: logFile.close() if len(os.listdir(exp_dir)) == 0: return None, "No files exported. See 'info' for more details" # zip everything up (unless we've only got a single ome-tiff) if format == 'OME-TIFF' and len(os.listdir(exp_dir)) == 1: ometiffIds = [t.id for t in parent.listAnnotations(ns=NSOMETIFF)] print "Deleting OLD ome-tiffs: %s" % ometiffIds conn.deleteObjects("Annotation", ometiffIds) export_file = os.path.join(folder_name, os.listdir(exp_dir)[0]) namespace = NSOMETIFF outputDisplayName = "OME-TIFF" mimetype = 'image/tiff' else: export_file = "%s.zip" % folder_name compress(export_file, folder_name) mimetype = 'application/zip' outputDisplayName = "Batch export zip" namespace = NSCREATED + "/omero/export_scripts/Batch_Image_Export" fileAnnotation, annMessage = script_utils.createLinkFileAnnotation( conn, export_file, parent, output=outputDisplayName, ns=namespace, mimetype=mimetype) message += annMessage return fileAnnotation, message
def movieFigure(conn, commandArgs): """ Makes the figure using the parameters in @commandArgs, attaches the figure to the parent Project/Dataset, and returns the file-annotation ID @param session The OMERO session @param commandArgs Map of parameters for the script @ returns Returns the id of the originalFileLink child. (ID object, not value) """ log("Movie figure created by OMERO on %s" % date.today()) log("") timeLabels = { "SECS_MILLIS": "seconds", "SECS": "seconds", "MINS": "minutes", "HOURS": "hours", "MINS_SECS": "mins:secs", "HOURS_MINS": "hours:mins" } timeUnits = "SECS" if "Time_Units" in commandArgs: timeUnits = commandArgs["Time_Units"] # convert from UI name to timeLabels key timeUnits = timeUnits.replace(" ", "_") if timeUnits not in timeLabels.keys(): timeUnits = "SECS" log("Time units are in %s" % timeLabels[timeUnits]) pixelIds = [] imageIds = [] imageLabels = [] message = "" # message to be returned to the client # function for getting image labels. def getImageNames(fullName, tagsList, pdList): name = fullName.split("/")[-1] return [name] # default function for getting labels is getName (or use datasets / tags) if "Image_Labels" in commandArgs: if commandArgs["Image_Labels"] == "Datasets": def getDatasets(name, tagsList, pdList): return [dataset for project, dataset in pdList] getLabels = getDatasets elif commandArgs["Image_Labels"] == "Tags": def getTags(name, tagsList, pdList): return tagsList getLabels = getTags else: getLabels = getImageNames else: getLabels = getImageNames # Get the images images, logMessage = scriptUtil.getObjects(conn, commandArgs) message += logMessage if not images: return None, message # Attach figure to the first image omeroImage = images[0] # process the list of images log("Image details:") for image in images: imageIds.append(image.getId()) pixelIds.append(image.getPrimaryPixels().getId()) # a map of imageId : list of (project, dataset) names. pdMap = figUtil.getDatasetsProjectsFromImages(conn.getQueryService(), imageIds) tagMap = figUtil.getTagsFromImages(conn.getMetadataService(), imageIds) # Build a legend entry for each image for image in images: name = image.getName() iId = image.getId() imageDate = image.getAcquisitionDate() tagsList = tagMap[iId] pdList = pdMap[iId] tags = ", ".join(tagsList) pdString = ", ".join(["%s/%s" % pd for pd in pdList]) log(" Image: %s ID: %d" % (name, iId)) log(" Date: %s" % date.fromtimestamp(imageDate / 1000)) log(" Tags: %s" % tags) log(" Project/Datasets: %s" % pdString) imageLabels.append(getLabels(name, tagsList, pdList)) # use the first image to define dimensions, channel colours etc. sizeX = omeroImage.getSizeX() sizeY = omeroImage.getSizeY() sizeZ = omeroImage.getSizeZ() sizeT = omeroImage.getSizeT() tIndexes = [] if "T_Indexes" in commandArgs: for t in commandArgs["T_Indexes"]: tIndexes.append(t) print "T_Indexes", tIndexes if len(tIndexes) == 0: # if no t-indexes given, use all t-indices tIndexes = range(sizeT) zStart = -1 zEnd = -1 if "Z_Start" in commandArgs: zStart = commandArgs["Z_Start"] if "Z_End" in commandArgs: zEnd = commandArgs["Z_End"] width = sizeX if "Width" in commandArgs: width = commandArgs["Width"] height = sizeY if "Height" in commandArgs: height = commandArgs["Height"] spacer = (width / 25) + 2 algorithm = ProjectionType.MAXIMUMINTENSITY if "Algorithm" in commandArgs: a = commandArgs["Algorithm"] if (a == "Mean Intensity"): algorithm = ProjectionType.MEANINTENSITY stepping = 1 if "Stepping" in commandArgs: s = commandArgs["Stepping"] if (0 < s < sizeZ): stepping = s scalebar = None if "Scalebar_Size" in commandArgs: sb = commandArgs["Scalebar_Size"] try: scalebar = int(sb) if scalebar <= 0: scalebar = None else: log("Scalebar is %d microns" % scalebar) except: log("Invalid value for scalebar: %s" % str(sb)) scalebar = None overlayColour = (255, 255, 255) if "Scalebar_Colour" in commandArgs: r, g, b, a = OVERLAY_COLOURS[commandArgs["Scalebar_Colour"]] overlayColour = (r, g, b) maxColCount = 10 if "Max_Columns" in commandArgs: maxColCount = commandArgs["Max_Columns"] figure = createMovieFigure(conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer, algorithm, stepping, scalebar, overlayColour, timeUnits, imageLabels, maxColCount) log("") figLegend = "\n".join(logLines) #print figLegend # bug fixing only format = commandArgs["Format"] figureName = "movieFigure" if "Figure_Name" in commandArgs: figureName = str(commandArgs["Figure_Name"]) figureName = os.path.basename(figureName) output = "localfile" if format == 'PNG': output = output + ".png" figureName = figureName + ".png" figure.save(output, "PNG") mimetype = "image/png" elif format == 'TIFF': output = output + ".tiff" figureName = figureName + ".tiff" figure.save(output, "TIFF") mimetype = "image/tiff" else: output = output + ".jpg" figureName = figureName + ".jpg" figure.save(output) mimetype = "image/jpeg" namespace = NSCREATED + "/omero/figure_scripts/Movie_Figure" fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation( conn, output, omeroImage, output="Movie figure", mimetype=mimetype, ns=namespace, desc=figLegend, origFilePathAndName=figureName) message += faMessage return fileAnnotation, message
def analyseImage(conn, image, cIndex): print "\n---------------------" print "Analysing Image: ", image.getName() # Get dictionary of tIndex:ellipse ellipses = getEllipses(conn, image.getId()) # Get dictionary of tIndex:averageIntensity intensityData = getEllipseData(image, ellipses, cIndex) # Get dictionary of tIndex:timeStamp (secs) timeValues = getTimes(conn, image) # We now have all the Data we need from OMERO # create lists of times (secs) and intensities... timeList = [] valueList = [] # ...Ordered by tIndex for t in range(image.getSizeT()): if t in intensityData: timeList.append(timeValues[t]) valueList.append(intensityData[t]) print "Analysing pixel values for %s time points" % len(timeList) # Find the bleach intensity & time bleachValue = min(valueList) bleachTindex = valueList.index(bleachValue) bleachTime = timeList[bleachTindex] preBleachValue = valueList[bleachTindex - 1] print "Bleach at tIndex: %s, TimeStamp: %0.2f seconds" \ % (bleachTindex, bleachTime) print "Before Bleach: %0.2f, After Bleach: %0.2f" \ % (preBleachValue, bleachValue) # Use last timepoint for max recovery recoveryValue = valueList[-1] endTimepoint = timeList[-1] mobileFraction = (recoveryValue - bleachValue) / \ (preBleachValue - bleachValue) print "Recovered to: %0.2f, after %0.2f seconds" \ % (recoveryValue, endTimepoint) print "Mobile Fraction: %0.2f" % mobileFraction halfRecovery = (recoveryValue + bleachValue) / 2 # quick & dirty - pick the first timepoint where we exceed half recovery recoveryValues = valueList[bleachTindex:] # just the values & times after bleach time recoveryTimes = timeList[bleachTindex:] for t, v in zip(recoveryTimes, recoveryValues): if v >= halfRecovery: tHalf = t - bleachTime break print "tHalf: %0.2f seconds" % tHalf csvLines = [ "Time (secs)," + ",".join([str(t) for t in timeList]), "\n", "Average pixel value," + ",".join([str(v) for v in valueList]), "\n", "tHalf (secs), %0.2f seconds" % tHalf, "mobileFraction, %0.2f" % mobileFraction ] f = open("FRAP.csv", "w") f.writelines(csvLines) f.close() namespace = "/omero-user-scripts/example/Simple_FRAP/" scriptUtil.createLinkFileAnnotation(conn, "FRAP.csv", image, ns=namespace) return tHalf
def writeMovie(commandArgs, conn): """ Makes the movie. @ returns Returns the file annotation """ log("Movie created by OMERO") log("") message = "" conn.SERVICE_OPTS.setOmeroGroup('-1') session = conn.c.sf gateway = conn scriptService = session.getScriptService() queryService = session.getQueryService() updateService = session.getUpdateService() rawFileStore = session.createRawFileStore() # Get the images images, logMessage = scriptUtil.getObjects(conn, commandArgs) message += logMessage if not images: return None, message omeroImage = images[ 0] # Get the first valid image (should be expanded to process the list) if commandArgs["RenderingDef_ID"] >= 0: omeroImage._prepareRenderingEngine(rdid=commandArgs["RenderingDef_ID"]) pixels = omeroImage.getPrimaryPixels() pixelsId = pixels.getId() sizeX = pixels.getSizeX() sizeY = pixels.getSizeY() sizeZ = pixels.getSizeZ() sizeC = pixels.getSizeC() sizeT = pixels.getSizeT() if (sizeX == None or sizeY == None or sizeZ == None or sizeT == None or sizeC == None): return if (pixels.getPhysicalSizeX() == None): commandArgs["Scalebar"] = 0 cRange = range(0, sizeC) if "Channels" in commandArgs and validChannels(commandArgs["Channels"], sizeC): cRange = commandArgs["Channels"] tzList = calculateRanges(sizeZ, sizeT, commandArgs) timeMap = calculateAquisitionTime(conn, pixelsId, cRange, tzList) if (timeMap == None): commandArgs["Show_Time"] = False if (timeMap != None): if (len(timeMap) == 0): commandArgs["Show_Time"] = False pixelTypeString = pixels.getPixelsType().getValue() frameNo = 1 omeroImage.setActiveChannels(map(lambda x: x + 1, cRange)) renderingEngine = omeroImage._re overlayColour = (255, 255, 255) if "Overlay_Colour" in commandArgs: r, g, b, a = COLOURS[commandArgs["Overlay_Colour"]] overlayColour = (r, g, b) canvasColour = tuple(COLOURS[commandArgs["Canvas_Colour"]][:3]) mw = commandArgs["Min_Width"] if mw < sizeX: mw = sizeX mh = commandArgs["Min_Height"] if mh < sizeY: mh = sizeY ovlpos = None canvas = None if sizeX < mw or sizeY < mh: ovlpos = ((mw - sizeX) / 2, (mh - sizeY) / 2) canvas = Image.new("RGBA", (mw, mh), canvasColour) format = commandArgs["Format"] fileNames = [] # add intro... if "Intro_Slide" in commandArgs and commandArgs["Intro_Slide"].id: intro_duration = commandArgs["Intro_Duration"] intro_fileId = commandArgs["Intro_Slide"].id.val intro_filenames = write_intro_end_slides(conn, commandArgs, intro_fileId, intro_duration, mw, mh) fileNames.extend(intro_filenames) # prepare watermark if "Watermark" in commandArgs and commandArgs["Watermark"].id: watermark = prepareWatermark(conn, commandArgs, mw, mh) # add movie frames... for tz in tzList: t = tz[0] z = tz[1] plane = getPlane(renderingEngine, z, t) planeImage = numpy.array(plane, dtype='uint32') planeImage = planeImage.byteswap() planeImage = planeImage.reshape(sizeX, sizeY) image = Image.frombuffer('RGBA', (sizeX, sizeY), planeImage.data, 'raw', 'ARGB', 0, 1) if ovlpos is not None: image2 = canvas.copy() image2.paste(image, ovlpos, image) image = image2 if "Scalebar" in commandArgs and commandArgs["Scalebar"]: image = addScalebar(commandArgs["Scalebar"], image, pixels, commandArgs) planeInfo = "z:" + str(z) + "t:" + str(t) if "Show_Time" in commandArgs and commandArgs["Show_Time"]: time = timeMap[planeInfo] image = addTimePoints(time, pixels, image, overlayColour) if "Show_Plane_Info" in commandArgs and commandArgs["Show_Plane_Info"]: image = addPlaneInfo(z, t, pixels, image, overlayColour) if "Watermark" in commandArgs and commandArgs["Watermark"].id: image = pasteWatermark(image, watermark) if format == QT: filename = str(frameNo) + '.png' image.save(filename, "PNG") else: filename = str(frameNo) + '.jpg' image.save(filename, "JPEG") fileNames.append(filename) frameNo += 1 # add exit frames... "outro" # add intro... if "Ending_Slide" in commandArgs and commandArgs["Ending_Slide"].id: end_duration = commandArgs["Ending_Duration"] end_fileId = commandArgs["Ending_Slide"].id.val end_filenames = write_intro_end_slides(conn, commandArgs, end_fileId, end_duration, mw, mh) fileNames.extend(end_filenames) filelist = ",".join(fileNames) ext = formatMap[format] movieName = "Movie" if "Movie_Name" in commandArgs: movieName = commandArgs["Movie_Name"] if not movieName.endswith(".%s" % ext): movieName = "%s.%s" % (movieName, ext) movieName = re.sub("[$&\;|\(\)<>' ]", "", movieName) # spaces etc in file name cause problems framesPerSec = 2 if "FPS" in commandArgs: framesPerSec = commandArgs["FPS"] buildAVI(mw, mh, filelist, framesPerSec, movieName, format) figLegend = "\n".join(logLines) mimetype = formatMimetypes[format] if not os.path.exists(movieName): print "mencoder Failed to create movie file: %s" % movieName return None, "Failed to create movie file: %s" % movieName if not commandArgs["Do_Link"]: originalFile = scriptUtil.createFile(updateService, movieName, mimetype, movieName) scriptUtil.uploadFile(rawFileStore, originalFile, movieName) return originalFile, message namespace = omero.constants.namespaces.NSCREATED + "/omero/export_scripts/Make_Movie" fileAnnotation, annMessage = scriptUtil.createLinkFileAnnotation( conn, movieName, omeroImage, output="Movie", ns=namespace, mimetype=mimetype) message += annMessage return fileAnnotation._obj, message
def run_processing(conn,script_params): file_anns = [] message = "" imageIds = [] image_id = script_params['ImageID'] imageIds.append(image_id) image = conn.getObject("Image",image_id) if not image: message = 'Could not find specified image' return message file_id = script_params['AnnotationID'] ann = conn.getObject("Annotation",file_id) if not ann: message = 'Could not find specified annotation' return message #other parameters sr_pix_size = script_params['SR_pixel_size'] if script_params['Convert_coordinates_to_nm']: cam_pix_size = script_params['Parent_Image_Pixel_Size'] else: cam_pix_size = 1 file_type = script_params['File_Type'] path_to_ann = ann.getFile().getPath() + '/' + ann.getFile().getName() name,ext = os.path.splitext(path_to_ann) if ('txt' in ext) or ('csv' in ext): #get the path to the downloaded data path_to_data = download_data(ann) #get all the xy coords in that data coords = parse_sr_data(path_to_data,file_type,cam_pix_size) #get the rois to be processed rectangles,rectIds = get_rectangles(conn,image_id,sr_pix_size) #calculate near neighbour distances nn_data,nn_hist,bins = process_data(conn,image,rectangles,coords) #put the data in an omero table # put_data_in_table(conn,imageIds,rectIds,nn_data,nn_hist,bins) #write the data to a csv file_name = "near_neighbours_" + ann.getFile().getName()[:-4] + '.csv' print file_name try: f = open(file_name,'w') for r in range(len(nn_data)): row = nn_data[r] for chan in range(row.shape[1]): f.write(','.join([str(c) for c in row[:,chan]])+'\n') finally: f.close() new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="Wrote near neighbour csv (Excel) file", mimetype="text/csv", desc=None) if new_file_ann: file_anns.append(new_file_ann) if not file_anns: faMessage = "No Analysis files created. See 'Info' or 'Error' for"\ " more details" elif len(file_anns) > 1: faMessage = "Created %s csv (Excel) files" % len(file_anns) message += faMessage else: message = 'file annotation must be txt or csv' return message # clean up delete_downloaded_data(ann) message += faMessage return message
def roiFigure(conn, commandArgs): """ This processes the script parameters, adding defaults if needed. Then calls a method to make the figure, and finally uploads and attaches this to the primary image. @param: session The OMERO session @param: commandArgs Map of String:Object parameters for the script. Objects are not rtypes, since getValue() was called when the map was processed below. But, list and map objects may contain rtypes (need to call getValue()) @return: the id of the originalFileLink child. (ID object, not value) """ log("ROI figure created by OMERO on %s" % date.today()) log("") message = "" # message to be returned to the client pixelIds = [] imageIds = [] imageLabels = [] # function for getting image labels. def getImageNames(fullName, tagsList, pdList): name = fullName.split("/")[-1] return [name] # default function for getting labels is getName (or use datasets / tags) if "Image_Labels" in commandArgs: if commandArgs["Image_Labels"] == "Datasets": def getDatasets(name, tagsList, pdList): return [dataset for project, dataset in pdList] getLabels = getDatasets elif commandArgs["Image_Labels"] == "Tags": def getTags(name, tagsList, pdList): return tagsList getLabels = getTags else: getLabels = getImageNames else: getLabels = getImageNames # Get the images images, logMessage = scriptUtil.getObjects(conn, commandArgs) message += logMessage if not images: return None, message # Check for rectangular ROIs and filter images list images = [image for image in images if image.getROICount("Rectangle") > 0] if not images: message += "No rectangle ROI found." return None, message # Attach figure to the first image omeroImage = images[0] # process the list of images log("Image details:") for image in images: imageIds.append(image.getId()) pixelIds.append(image.getPrimaryPixels().getId()) # a map of imageId : list of (project, dataset) names. pdMap = figUtil.getDatasetsProjectsFromImages(conn.getQueryService(), imageIds) tagMap = figUtil.getTagsFromImages(conn.getMetadataService(), imageIds) # Build a legend entry for each image for image in images: name = image.getName() iId = image.getId() imageDate = image.getAcquisitionDate() tagsList = tagMap[iId] pdList = pdMap[iId] tags = ", ".join(tagsList) pdString = ", ".join(["%s/%s" % pd for pd in pdList]) log(" Image: %s ID: %d" % (name, iId)) if imageDate: log(" Date: %s" % imageDate) else: log(" Date: not set") log(" Tags: %s" % tags) log(" Project/Datasets: %s" % pdString) imageLabels.append(getLabels(name, tagsList, pdList)) # use the first image to define dimensions, channel colours etc. sizeX = omeroImage.getSizeX() sizeY = omeroImage.getSizeY() sizeZ = omeroImage.getSizeZ() sizeC = omeroImage.getSizeC() width = sizeX if "Width" in commandArgs: w = commandArgs["Width"] try: width = int(w) except: log("Invalid width: %s Using default value: %d" % (str(w), sizeX)) height = sizeY if "Height" in commandArgs: h = commandArgs["Height"] try: height = int(h) except: log("Invalid height: %s Using default value" % (str(h), sizeY)) log("Image dimensions for all panels (pixels): width: %d height: %d" % (width, height)) # the channels in the combined image, if "Merged_Channels" in commandArgs: # convert to 0-based mergedIndexes = [c - 1 for c in commandArgs["Merged_Channels"]] else: mergedIndexes = range(sizeC) # show all mergedIndexes.reverse() # if no colours added, use existing rendering settings. mergedColours = {} # Actually, nicer to always use existing rendering settings. # if "Merged_Colours" in commandArgs: # for i, c in enumerate(commandArgs["Merged_Colours"]): # if c in COLOURS: # mergedColours[i] = COLOURS[c] algorithm = ProjectionType.MAXIMUMINTENSITY if "Algorithm" in commandArgs: a = commandArgs["Algorithm"] if (a == "Mean Intensity"): algorithm = ProjectionType.MEANINTENSITY stepping = 1 if "Stepping" in commandArgs: s = commandArgs["Stepping"] if (0 < s < sizeZ): stepping = s scalebar = None if "Scalebar" in commandArgs: sb = commandArgs["Scalebar"] try: scalebar = int(sb) if scalebar <= 0: scalebar = None else: log("Scalebar is %d microns" % scalebar) except: log("Invalid value for scalebar: %s" % str(sb)) scalebar = None overlayColour = (255, 255, 255) if "Scalebar_Colour" in commandArgs: if commandArgs["Scalebar_Colour"] in OVERLAY_COLOURS: r, g, b, a = OVERLAY_COLOURS[commandArgs["Scalebar_Colour"]] overlayColour = (r, g, b) roiZoom = None if "Roi_Zoom" in commandArgs: roiZoom = float(commandArgs["Roi_Zoom"]) if roiZoom == 0: roiZoom = None maxColumns = None if "Max_Columns" in commandArgs: maxColumns = commandArgs["Max_Columns"] showRoiDuration = False if "Show_ROI_Duration" in commandArgs: showRoiDuration = commandArgs["Show_ROI_Duration"] roiLabel = "FigureROI" if "Roi_Selection_Label" in commandArgs: roiLabel = commandArgs["Roi_Selection_Label"] spacer = (width / 50) + 2 print "showRoiDuration", showRoiDuration fig = getSplitView(conn, imageIds, pixelIds, mergedIndexes, mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, overlayColour, roiZoom, maxColumns, showRoiDuration, roiLabel) # fig.show() # bug-fixing only if fig is None: logMessage = "No figure produced" log("\n" + logMessage) message += logMessage return None, message figLegend = "\n".join(logStrings) # print figLegend # bug fixing only format = commandArgs["Format"] figureName = "movieROIFigure" if "Figure_Name" in commandArgs: figureName = commandArgs["Figure_Name"] figureName = os.path.basename(figureName) output = "localfile" if format == 'PNG': output = output + ".png" figureName = figureName + ".png" fig.save(output, "PNG") mimetype = "image/png" elif format == 'TIFF': output = output + ".tiff" figureName = figureName + ".tiff" fig.save(output, "TIFF") mimetype = "image/tiff" else: output = output + ".jpg" figureName = figureName + ".jpg" fig.save(output) mimetype = "image/jpeg" # Use util method to upload the figure 'output' to the server, attaching # it to the omeroImage, adding the # figLegend as the fileAnnotation description. # Returns the id of the originalFileLink child. (ID object, not value) namespace = NSCREATED + "/omero/figure_scripts/Movie_ROI_Figure" fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation( conn, output, omeroImage, output="Movie ROI figure", mimetype=mimetype, ns=namespace, desc=figLegend, origFilePathAndName=figureName) message += faMessage return fileAnnotation, message
def batchImageExport(conn, scriptParams): # for params with default values, we can get the value directly splitCs = scriptParams["Export_Individual_Channels"] mergedCs = scriptParams["Export_Merged_Image"] greyscale = scriptParams["Individual_Channels_Grey"] dataType = scriptParams["Data_Type"] ids = scriptParams["IDs"] folder_name = scriptParams["Folder_Name"] format = scriptParams["Format"] projectZ = "Choose_Z_Section" in scriptParams and scriptParams["Choose_Z_Section"] == 'Max projection' if (not splitCs) and (not mergedCs): log("Not chosen to save Individual Channels OR Merged Image") return # check if we have these params channelNames = [] if "Channel_Names" in scriptParams: channelNames = scriptParams["Channel_Names"] imgWidth = None if "Image_Width" in scriptParams: imgWidth = scriptParams["Image_Width"] # functions used below for each imaage. def getZrange(sizeZ, scriptParams): zRange = None if "Choose_Z_Section" in scriptParams: zChoice = scriptParams["Choose_Z_Section"] # NB: all Z indices in this script are 1-based if zChoice == 'ALL Z planes': zRange = (1, sizeZ+1) elif "OR_specify_Z_index" in scriptParams: zIndex = scriptParams["OR_specify_Z_index"] zIndex = min(zIndex, sizeZ) zRange = (zIndex,) elif "OR_specify_Z_start_AND..." in scriptParams and "...specify_Z_end" in scriptParams: start = scriptParams["OR_specify_Z_start_AND..."] start = min(start, sizeZ) end = scriptParams["...specify_Z_end"] end = min(end, sizeZ) zStart = min(start, end) # in case user got zStart and zEnd mixed up zEnd = max(start, end) if zStart == zEnd: zRange = (zStart,) else: zRange = (zStart, zEnd+1) return zRange def getTrange(sizeT, scriptParams): tRange = None if "Choose_T_Section" in scriptParams: tChoice = scriptParams["Choose_T_Section"] # NB: all T indices in this script are 1-based if tChoice == 'ALL T planes': tRange = (1, sizeT+1) elif "OR_specify_T_index" in scriptParams: tIndex = scriptParams["OR_specify_T_index"] tIndex = min(tIndex, sizeT) tRange = (tIndex,) elif "OR_specify_T_start_AND..." in scriptParams and "...specify_T_end" in scriptParams: start = scriptParams["OR_specify_T_start_AND..."] start = min(start, sizeT) end = scriptParams["...specify_T_end"] end = min(end, sizeT) tStart = min(start, end) # in case user got zStart and zEnd mixed up tEnd = max(start, end) if tStart == tEnd: tRange = (tStart,) else: tRange = (tStart, tEnd+1) return tRange # Get the images or datasets message = "" objects, logMessage = script_utils.getObjects(conn, scriptParams) message += logMessage if not objects: return None, message # Attach figure to the first image parent = objects[0] if dataType == 'Dataset': images = [] for ds in objects: images.extend( list(ds.listChildren()) ) if not images: message += "No image found in dataset(s)" return None, message else: images = objects log("Processing %s images" % len(images)) # somewhere to put images curr_dir = os.getcwd() exp_dir = os.path.join(curr_dir, folder_name) try: os.mkdir(exp_dir) except: pass # do the saving to disk if format == 'OME-TIFF': for img in images: log("Exporting image as OME-TIFF: %s" % img.getName()) saveAsOmeTiff(conn, img, folder_name) else: for img in images: log("\n----------- Saving planes from image: '%s' ------------" % img.getName()) sizeC = img.getSizeC() sizeZ = img.getSizeZ() sizeT = img.getSizeT() zRange = getZrange(sizeZ, scriptParams) tRange = getTrange(sizeT, scriptParams) log("Using:") if zRange is None: log(" Z-index: Last-viewed") elif len(zRange) == 1: log(" Z-index: %d" % zRange[0]) else: log(" Z-range: %s-%s" % ( zRange[0],zRange[1]-1) ) if projectZ: log(" Z-projection: ON") if tRange is None: log(" T-index: Last-viewed") elif len(tRange) == 1: log(" T-index: %d" % tRange[0]) else: log(" T-range: %s-%s" % ( tRange[0],tRange[1]-1) ) log(" Format: %s" % format) if imgWidth is None: log(" Image Width: no resize") else: log(" Image Width: %s" % imgWidth) log(" Greyscale: %s" % greyscale) log("Channel Rendering Settings:") for ch in img.getChannels(): log(" %s: %d-%d" % (ch.getLabel(), ch.getWindowStart(), ch.getWindowEnd()) ) savePlanesForImage(conn, img, sizeC, splitCs, mergedCs, channelNames, zRange, tRange, greyscale, imgWidth, projectZ=projectZ, format=format, folder_name=folder_name) # write log for exported images (not needed for ome-tiff) logFile = open(os.path.join(exp_dir, 'Batch_Image_Export.txt'), 'w') try: for s in logStrings: logFile.write(s) logFile.write("\n") finally: logFile.close() # zip everything up (unless we've only got a single ome-tiff) if format == 'OME-TIFF' and len(os.listdir(exp_dir)) == 1: export_file = os.path.join(folder_name, os.listdir(exp_dir)[0]) mimetype = 'image/tiff' else: export_file = "%s.zip" % folder_name compress(export_file, folder_name) mimetype='application/zip' namespace = omero.constants.namespaces.NSCREATED+"/omero/export_scripts/Batch_Image_Export" fileAnnotation, annMessage = script_utils.createLinkFileAnnotation(conn, export_file, parent, output="Batch export zip", ns=namespace, mimetype=mimetype) message += annMessage return fileAnnotation, message
def batchImageExport(conn, scriptParams): # for params with default values, we can get the value directly splitCs = scriptParams["Export_Individual_Channels"] mergedCs = scriptParams["Export_Merged_Image"] greyscale = scriptParams["Individual_Channels_Grey"] dataType = scriptParams["Data_Type"] folder_name = scriptParams["Folder_Name"] folder_name = os.path.basename(folder_name) format = scriptParams["Format"] projectZ = "Choose_Z_Section" in scriptParams and scriptParams["Choose_Z_Section"] == "Max projection" if (not splitCs) and (not mergedCs): log("Not chosen to save Individual Channels OR Merged Image") return # check if we have these params channelNames = [] if "Channel_Names" in scriptParams: channelNames = scriptParams["Channel_Names"] zoomPercent = None if "Zoom" in scriptParams and scriptParams["Zoom"] != "100%": zoomPercent = int(scriptParams["Zoom"][:-1]) # functions used below for each imaage. def getZrange(sizeZ, scriptParams): zRange = None if "Choose_Z_Section" in scriptParams: zChoice = scriptParams["Choose_Z_Section"] # NB: all Z indices in this script are 1-based if zChoice == "ALL Z planes": zRange = (1, sizeZ + 1) elif "OR_specify_Z_index" in scriptParams: zIndex = scriptParams["OR_specify_Z_index"] zIndex = min(zIndex, sizeZ) zRange = (zIndex,) elif "OR_specify_Z_start_AND..." in scriptParams and "...specify_Z_end" in scriptParams: start = scriptParams["OR_specify_Z_start_AND..."] start = min(start, sizeZ) end = scriptParams["...specify_Z_end"] end = min(end, sizeZ) # in case user got zStart and zEnd mixed up zStart = min(start, end) zEnd = max(start, end) if zStart == zEnd: zRange = (zStart,) else: zRange = (zStart, zEnd + 1) return zRange def getTrange(sizeT, scriptParams): tRange = None if "Choose_T_Section" in scriptParams: tChoice = scriptParams["Choose_T_Section"] # NB: all T indices in this script are 1-based if tChoice == "ALL T planes": tRange = (1, sizeT + 1) elif "OR_specify_T_index" in scriptParams: tIndex = scriptParams["OR_specify_T_index"] tIndex = min(tIndex, sizeT) tRange = (tIndex,) elif "OR_specify_T_start_AND..." in scriptParams and "...specify_T_end" in scriptParams: start = scriptParams["OR_specify_T_start_AND..."] start = min(start, sizeT) end = scriptParams["...specify_T_end"] end = min(end, sizeT) # in case user got zStart and zEnd mixed up tStart = min(start, end) tEnd = max(start, end) if tStart == tEnd: tRange = (tStart,) else: tRange = (tStart, tEnd + 1) return tRange # Get the images or datasets message = "" objects, logMessage = script_utils.getObjects(conn, scriptParams) message += logMessage if not objects: return None, message # Attach figure to the first image parent = objects[0] if dataType == "Dataset": images = [] for ds in objects: images.extend(list(ds.listChildren())) if not images: message += "No image found in dataset(s)" return None, message else: images = objects log("Processing %s images" % len(images)) # somewhere to put images curr_dir = os.getcwd() exp_dir = os.path.join(curr_dir, folder_name) try: os.mkdir(exp_dir) except: pass # max size (default 12kx12k) size = conn.getDownloadAsMaxSizeSetting() size = int(size) ids = [] # do the saving to disk for img in images: pixels = img.getPrimaryPixels() if pixels.getId() in ids: continue ids.append(pixels.getId()) sizeX = pixels.getSizeX() sizeY = pixels.getSizeY() if sizeX * sizeY > size: log(" ** Can't export a 'Big' image to %s. **" % format) if len(images) == 1: return None, "Can't export a 'Big' image to %s." % format continue else: log("Exporting image as %s: %s" % (format, img.getName())) if format == "OME-TIFF": saveAsOmeTiff(conn, img, folder_name) else: if img._prepareRE().requiresPixelsPyramid(): log(" ** Can't export a 'Big' image to OME-TIFF. **") log("\n----------- Saving planes from image: '%s' ------------" % img.getName()) sizeC = img.getSizeC() sizeZ = img.getSizeZ() sizeT = img.getSizeT() zRange = getZrange(sizeZ, scriptParams) tRange = getTrange(sizeT, scriptParams) log("Using:") if zRange is None: log(" Z-index: Last-viewed") elif len(zRange) == 1: log(" Z-index: %d" % zRange[0]) else: log(" Z-range: %s-%s" % (zRange[0], zRange[1] - 1)) if projectZ: log(" Z-projection: ON") if tRange is None: log(" T-index: Last-viewed") elif len(tRange) == 1: log(" T-index: %d" % tRange[0]) else: log(" T-range: %s-%s" % (tRange[0], tRange[1] - 1)) log(" Format: %s" % format) if zoomPercent is None: log(" Image Zoom: 100%") else: log(" Image Zoom: %s" % zoomPercent) log(" Greyscale: %s" % greyscale) log("Channel Rendering Settings:") for ch in img.getChannels(): log(" %s: %d-%d" % (ch.getLabel(), ch.getWindowStart(), ch.getWindowEnd())) try: savePlanesForImage( conn, img, sizeC, splitCs, mergedCs, channelNames, zRange, tRange, greyscale, zoomPercent, projectZ=projectZ, format=format, folder_name=folder_name, ) finally: # Make sure we close Rendering Engine img._re.close() # write log for exported images (not needed for ome-tiff) logFile = open(os.path.join(exp_dir, "Batch_Image_Export.txt"), "w") try: for s in logStrings: logFile.write(s) logFile.write("\n") finally: logFile.close() if len(os.listdir(exp_dir)) == 0: return None, "No files exported. See 'info' for more details" # zip everything up (unless we've only got a single ome-tiff) if format == "OME-TIFF" and len(os.listdir(exp_dir)) == 1: ometiffIds = [t.id for t in parent.listAnnotations(ns=NSOMETIFF)] print "Deleting OLD ome-tiffs: %s" % ometiffIds conn.deleteObjects("Annotation", ometiffIds) export_file = os.path.join(folder_name, os.listdir(exp_dir)[0]) namespace = NSOMETIFF outputDisplayName = "OME-TIFF" mimetype = "image/tiff" else: export_file = "%s.zip" % folder_name compress(export_file, folder_name) mimetype = "application/zip" outputDisplayName = "Batch export zip" namespace = NSCREATED + "/omero/export_scripts/Batch_Image_Export" fileAnnotation, annMessage = script_utils.createLinkFileAnnotation( conn, export_file, parent, output=outputDisplayName, ns=namespace, mimetype=mimetype ) message += annMessage return fileAnnotation, message
def processImages(conn, scriptParams): fileAnns = [] message ="" # Get the images images, logMessage = scriptUtil.getObjects(conn, scriptParams) message += logMessage if not images: return None, message # Check for line and polyline ROIs and filter images list images = [image for image in images if image.getROICount(["Polyline","Line"])>0] if not images: message += "No ROI containing line or polyline was found." return None, message for image in images: print "\nAnalysing Image: %s ID: %s" % (image.getName(), image.getId()) if image.getSizeT() > 1: message += "%s ID: %s appears to be a time-lapse Image, not a kymograph." % (image.getName(), image.getId()) continue roiService = conn.getRoiService() result = roiService.findByImage(image.getId(), None) secsPerPixelY = image.getPixelSizeY() micronsPerPixelX = image.getPixelSizeX() if secsPerPixelY and micronsPerPixelX: micronsPerSec = micronsPerPixelX / secsPerPixelY else: micronsPerSec = None # for each line or polyline, create a row in csv table: y(t), x, dy(dt), dx, x/t (line), x/t (average) colNames = "\nt_start (pixels), x_start (pixels), t_end (pixels), x_end (pixels), dt (pixels), dx (pixels), x/t, average x/t, speed(um/sec)" tableData = "" for roi in result.rois: for s in roi.copyShapes(): if type(s) == omero.model.LineI: tableData += "\nLine" x1 = s.getX1().getValue() x2 = s.getX2().getValue() y1 = s.getY1().getValue() y2 = s.getY2().getValue() dx = abs(x1-x2) dy = abs(y1-y2) dxPerY = float(dx)/dy tableData += "\n" tableData += ",".join([str(x) for x in (y1, x1, y2, x2, dy, dx, dxPerY, dxPerY, "")]) if micronsPerSec: speed = dxPerY * micronsPerSec tableData += "%s" % speed elif type(s) == omero.model.PolylineI: tableData += "\nPolyline" points = pointsStringToXYlist(s.getPoints().getValue()) xStart, yStart = points[0] for i in range(1, len(points)): x1, y1 = points[i-1] x2, y2 = points[i] dx = abs(x1-x2) dy = abs(y1-y2) dxPerY = float(dx)/dy avXperY = abs(float(x2-xStart)/(y2-yStart)) tableData += "\n" tableData += ",".join([str(x) for x in (y1, x1, y2, x2, dy, dx, dxPerY, avXperY, "")]) if micronsPerSec: speed = dxPerY * micronsPerSec tableData += "%s" % speed # write table data to csv... if len(tableData) > 0: tableString = "secsPerPixelY: %s" % secsPerPixelY tableString += '\nmicronsPerPixelX: %s' % micronsPerPixelX tableString += "\nmicronsPerSec: %s" % micronsPerSec tableString += "\n" tableString += colNames tableString += tableData print tableString csvFileName = 'kymograph_velocities_%s.csv' % image.getId() csvFile = open(csvFileName, 'w') try: csvFile.write(tableString) finally: csvFile.close() fileAnn, faMessage = scriptUtil.createLinkFileAnnotation(conn, csvFileName, image, output="Line Plot csv (Excel) file", mimetype="text/csv", desc=None) print fileAnn, faMessage if fileAnn: fileAnns.append(fileAnn) else: print "Found NO lines or polylines to analyse for Image" if not fileAnns: faMessage = "No Analysis files created. See 'Info' or 'Error' for more details" elif len(fileAnns) > 1: faMessage = "Created %s csv (Excel) files" % len(fileAnns) message += faMessage return fileAnns, message
def processImages(conn, scriptParams): fileAnns = [] message = "" # Get the images images, logMessage = scriptUtil.getObjects(conn, scriptParams) message += logMessage if not images: return None, message # Check for line and polyline ROIs and filter images list images = [ image for image in images if image.getROICount(["Polyline", "Line"]) > 0 ] if not images: message += "No ROI containing line or polyline was found." return None, message for image in images: print "\nAnalysing Image: %s ID: %s" \ % (image.getName(), image.getId()) if image.getSizeT() > 1: message += "%s ID: %s appears to be a time-lapse Image," \ " not a kymograph." % (image.getName(), image.getId()) continue roiService = conn.getRoiService() result = roiService.findByImage(image.getId(), None) secsPerPixelY = image.getPixelSizeY() micronsPerPixelX = image.getPixelSizeX() if secsPerPixelY and micronsPerPixelX: micronsPerSec = micronsPerPixelX / secsPerPixelY else: micronsPerSec = None # for each line or polyline, create a row in csv table: y(t), x, # dy(dt), dx, x/t (line), x/t (average) colNames = "\nt_start (pixels), x_start (pixels), t_end (pixels)," \ " x_end (pixels), dt (pixels), dx (pixels), x/t, average x/t," \ " speed(um/sec)" tableData = "" for roi in result.rois: for s in roi.copyShapes(): if type(s) == omero.model.LineI: tableData += "\nLine" x1 = s.getX1().getValue() x2 = s.getX2().getValue() y1 = s.getY1().getValue() y2 = s.getY2().getValue() dx = abs(x1 - x2) dy = abs(y1 - y2) dxPerY = float(dx) / dy tableData += "\n" tableData += ",".join([ str(x) for x in (y1, x1, y2, x2, dy, dx, dxPerY, dxPerY, "") ]) if micronsPerSec: speed = dxPerY * micronsPerSec tableData += "%s" % speed elif type(s) == omero.model.PolylineI: tableData += "\nPolyline" points = pointsStringToXYlist(s.getPoints().getValue()) xStart, yStart = points[0] for i in range(1, len(points)): x1, y1 = points[i - 1] x2, y2 = points[i] dx = abs(x1 - x2) dy = abs(y1 - y2) dxPerY = float(dx) / dy avXperY = abs(float(x2 - xStart) / (y2 - yStart)) tableData += "\n" tableData += ",".join([ str(x) for x in (y1, x1, y2, x2, dy, dx, dxPerY, avXperY, "") ]) if micronsPerSec: speed = dxPerY * micronsPerSec tableData += "%s" % speed # write table data to csv... if len(tableData) > 0: tableString = "secsPerPixelY: %s" % secsPerPixelY tableString += '\nmicronsPerPixelX: %s' % micronsPerPixelX tableString += "\nmicronsPerSec: %s" % micronsPerSec tableString += "\n" tableString += colNames tableString += tableData print tableString csvFileName = 'kymograph_velocities_%s.csv' % image.getId() csvFile = open(csvFileName, 'w') try: csvFile.write(tableString) finally: csvFile.close() fileAnn, faMessage = scriptUtil.createLinkFileAnnotation( conn, csvFileName, image, output="Line Plot csv (Excel) file", mimetype="text/csv", desc=None) print fileAnn, faMessage if fileAnn: fileAnns.append(fileAnn) else: print "Found NO lines or polylines to analyse for Image" if not fileAnns: faMessage = "No Analysis files created. See 'Info' or 'Error'" \ " for more details" elif len(fileAnns) > 1: faMessage = "Created %s csv (Excel) files" % len(fileAnns) message += faMessage return fileAnns, message
def roiFigure(conn, commandArgs): """ This processes the script parameters, adding defaults if needed. Then calls a method to make the figure, and finally uploads and attaches this to the primary image. @param: session The OMERO session @param: commandArgs Map of String:Object parameters for the script. Objects are not rtypes, since getValue() was called when the map was processed below. But, list and map objects may contain rtypes (need to call getValue()) @return: the id of the originalFileLink child. (ID object, not value) """ log("ROI figure created by OMERO on %s" % date.today()) log("") message = "" # message to be returned to the client pixelIds = [] imageIds = [] imageLabels = [] # function for getting image labels. def getImageNames(fullName, tagsList, pdList): name = fullName.split("/")[-1] return [name] # default function for getting labels is getName (or use datasets / tags) if "Image_Labels" in commandArgs: if commandArgs["Image_Labels"] == "Datasets": def getDatasets(name, tagsList, pdList): return [dataset for project, dataset in pdList] getLabels = getDatasets elif commandArgs["Image_Labels"] == "Tags": def getTags(name, tagsList, pdList): return tagsList getLabels = getTags else: getLabels = getImageNames else: getLabels = getImageNames # Get the images images, logMessage = scriptUtil.getObjects(conn, commandArgs) message += logMessage if not images: return None, message # Check for rectangular ROIs and filter images list images = [image for image in images if image.getROICount("Rectangle") > 0] if not images: message += "No rectangle ROI found." return None, message # Attach figure to the first image omeroImage = images[0] # process the list of images. If imageIds is not set, script can't run. log("Image details:") for image in images: imageIds.append(image.getId()) pixelIds.append(image.getPrimaryPixels().getId()) # a map of imageId : list of (project, dataset) names. pdMap = figUtil.getDatasetsProjectsFromImages(conn.getQueryService(), imageIds) tagMap = figUtil.getTagsFromImages(conn.getMetadataService(), imageIds) # Build a legend entry for each image for image in images: name = image.getName() imageDate = image.getAcquisitionDate() iId = image.getId() tagsList = tagMap[iId] pdList = pdMap[iId] tags = ", ".join(tagsList) pdString = ", ".join(["%s/%s" % pd for pd in pdList]) log(" Image: %s ID: %d" % (name, iId)) if imageDate: log(" Date: %s" % imageDate) else: log(" Date: not set") log(" Tags: %s" % tags) log(" Project/Datasets: %s" % pdString) imageLabels.append(getLabels(name, tagsList, pdList)) # use the first image to define dimensions, channel colours etc. sizeX = omeroImage.getSizeX() sizeY = omeroImage.getSizeY() sizeZ = omeroImage.getSizeZ() sizeC = omeroImage.getSizeC() width = sizeX if "Width" in commandArgs: w = commandArgs["Width"] try: width = int(w) except: log("Invalid width: %s Using default value: %d" % (str(w), sizeX)) height = sizeY if "Height" in commandArgs: h = commandArgs["Height"] try: height = int(h) except: log("Invalid height: %s Using default value" % (str(h), sizeY)) log("Image dimensions for all panels (pixels): width: %d height: %d" % (width, height)) mergedIndexes = [] # the channels in the combined image, mergedColours = {} if "Merged_Colours" in commandArgs: cColourMap = commandArgs["Merged_Colours"] for c in cColourMap: rgb = cColourMap[c] try: rgb = int(rgb) cIndex = int(c) except ValueError: print "Merged_Colours map should be index:rgbInt. Not %s:%s" \ % (c, rgb) continue rgba = imgUtil.RGBIntToRGBA(rgb) mergedColours[cIndex] = rgba mergedIndexes.append(cIndex) mergedIndexes.sort() # make sure we have some merged channels if len(mergedIndexes) == 0: mergedIndexes = range(sizeC) mergedIndexes.reverse() mergedNames = False if "Merged_Names" in commandArgs: mergedNames = commandArgs["Merged_Names"] # Make channel-names map. If argument wasn't specified, name by index channelNames = {} if "Channel_Names" in commandArgs: cNameMap = commandArgs["Channel_Names"] for c in range(sizeC): if str(c) in cNameMap: channelNames[c] = cNameMap[str(c)] else: channelNames[c] = str(c) else: for c in range(sizeC): channelNames[c] = str(c) # Make split-indexes list. If no "Split_Indexes", show none: # http://www.openmicroscopy.org/community/viewtopic.php?f=4&t=940 splitIndexes = [] if "Split_Indexes" in commandArgs: for index in commandArgs["Split_Indexes"]: splitIndexes.append(index) colourChannels = True if "Split_Panels_Grey" in commandArgs and commandArgs["Split_Panels_Grey"]: colourChannels = False algorithm = ProjectionType.MAXIMUMINTENSITY if "Algorithm" in commandArgs: a = commandArgs["Algorithm"] if (a == "Mean Intensity"): algorithm = ProjectionType.MEANINTENSITY stepping = 1 if "Stepping" in commandArgs: s = commandArgs["Stepping"] if (0 < s < sizeZ): stepping = s scalebar = None if "Scalebar" in commandArgs: sb = commandArgs["Scalebar"] try: scalebar = int(sb) if scalebar <= 0: scalebar = None else: log("Scalebar is %d microns" % scalebar) except: log("Invalid value for scalebar: %s" % str(sb)) scalebar = None overlayColour = (255, 255, 255) if "Overlay_Colour" in commandArgs: r, g, b, a = OVERLAY_COLOURS[commandArgs["Overlay_Colour"]] overlayColour = (r, g, b) roiZoom = None if "ROI_Zoom" in commandArgs: roiZoom = float(commandArgs["ROI_Zoom"]) if roiZoom == 0: roiZoom = None roiLabel = "FigureROI" if "ROI_Label" in commandArgs: roiLabel = commandArgs["ROI_Label"] spacer = (width/50) + 2 fig = getSplitView( conn, imageIds, pixelIds, splitIndexes, channelNames, mergedNames, colourChannels, mergedIndexes, mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, overlayColour, roiZoom, roiLabel) if fig is None: logMessage = "No figure produced" log("\n"+logMessage) message += logMessage return None, message # fig.show() # bug-fixing only log("") figLegend = "\n".join(logStrings) # print figLegend # bug fixing only format = commandArgs["Format"] figureName = "roiFigure" if "Figure_Name" in commandArgs: figureName = commandArgs["Figure_Name"] figureName = os.path.basename(figureName) output = "localfile" if format == 'PNG': output = output + ".png" figureName = figureName + ".png" fig.save(output, "PNG") mimetype = "image/png" elif format == 'TIFF': output = output + ".tiff" figureName = figureName + ".tiff" fig.save(output, "TIFF") mimetype = "image/tiff" else: output = output + ".jpg" figureName = figureName + ".jpg" fig.save(output) mimetype = "image/jpeg" # Use util method to upload the figure 'output' to the server, attaching # it to the omeroImage, adding the # figLegend as the fileAnnotation description. # Returns the id of the originalFileLink child. (ID object, not value) namespace = NSCREATED + "/omero/figure_scripts/ROI_Split_Figure" fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation( conn, output, omeroImage, output="ROI Split figure", mimetype=mimetype, ns=namespace, desc=figLegend, origFilePathAndName=figureName) message += faMessage return fileAnnotation, message
def makeThumbnailFigure(conn, scriptParams): """ Makes the figure using the parameters in @scriptParams, attaches the figure to the parent Project/Dataset, and returns the file-annotation ID @ returns Returns the id of the originalFileLink child. (ID object, not value) """ log("Thumbnail figure created by OMERO") log("") message = "" # Get the objects (images or datasets) objects, logMessage = scriptUtil.getObjects(conn, scriptParams) message += logMessage if not objects: return None, message # Get parent parent = None if "Parent_ID" in scriptParams and len(scriptParams["IDs"]) > 1: if scriptParams["Data_Type"] == "Image": parent = conn.getObject("Dataset", scriptParams["Parent_ID"]) else: parent = conn.getObject("Project", scriptParams["Parent_ID"]) if parent is None: parent = objects[0] # Attach figure to the first object parentClass = parent.OMERO_CLASS log("Figure will be linked to %s%s: %s" % (parentClass[0].lower(), parentClass[1:], parent.getName())) tagIds = [] if "Tag_IDs" in scriptParams: tagIds = scriptParams['Tag_IDs'] if len(tagIds) == 0: tagIds = None showUntagged = False if (tagIds): showUntagged = scriptParams["Show_Untagged_Images"] thumbSize = scriptParams["Thumbnail_Size"] maxColumns = scriptParams["Max_Columns"] figHeight = 0 figWidth = 0 dsCanvases = [] if scriptParams["Data_Type"] == "Dataset": for dataset in objects: log("Dataset: %s ID: %d" % (dataset.getName(), dataset.getId())) images = list(dataset.listChildren()) title = dataset.getName().decode('utf8') dsCanvas = paintDatasetCanvas(conn, images, title, tagIds, showUntagged, length=thumbSize, colCount=maxColumns) if dsCanvas is None: continue dsCanvases.append(dsCanvas) figHeight += dsCanvas.size[1] figWidth = max(figWidth, dsCanvas.size[0]) else: imageCanvas = paintDatasetCanvas(conn, objects, "", tagIds, showUntagged, length=thumbSize, colCount=maxColumns) dsCanvases.append(imageCanvas) figHeight += imageCanvas.size[1] figWidth = max(figWidth, imageCanvas.size[0]) if len(dsCanvases) == 0: message += "No figure created" return None, message figure = Image.new("RGB", (figWidth, figHeight), WHITE) y = 0 for ds in dsCanvases: imgUtil.pasteImage(ds, figure, 0, y) y += ds.size[1] log("") figLegend = "\n".join(logLines) format = scriptParams["Format"] figureName = scriptParams["Figure_Name"] figureName = os.path.basename(figureName) output = "localfile" if format == 'PNG': output = output + ".png" figureName = figureName + ".png" figure.save(output, "PNG") mimetype = "image/png" elif format == 'TIFF': output = output + ".tiff" figureName = figureName + ".tiff" figure.save(output, "TIFF") mimetype = "image/tiff" else: output = output + ".jpg" figureName = figureName + ".jpg" figure.save(output) mimetype = "image/jpeg" namespace = NSCREATED + "/omero/figure_scripts/Thumbnail_Figure" fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation( conn, output, parent, output="Thumbnail figure", mimetype=mimetype, ns=namespace, desc=figLegend, origFilePathAndName=figureName) message += faMessage return fileAnnotation, message
def writeMovie(commandArgs, conn): """ Makes the movie. @ returns Returns the file annotation """ log("Movie created by OMERO") log("") message = "" session = conn.c.sf updateService = session.getUpdateService() rawFileStore = session.createRawFileStore() # Get the images images, logMessage = scriptUtil.getObjects(conn, commandArgs) message += logMessage if not images: return None, message # Get the first valid image (should be expanded to process the list) omeroImage = images[0] if commandArgs["RenderingDef_ID"] >= 0: omeroImage._prepareRenderingEngine(rdid=commandArgs["RenderingDef_ID"]) pixels = omeroImage.getPrimaryPixels() pixelsId = pixels.getId() sizeX = pixels.getSizeX() sizeY = pixels.getSizeY() sizeZ = pixels.getSizeZ() sizeC = pixels.getSizeC() sizeT = pixels.getSizeT() if (sizeX is None or sizeY is None or sizeZ is None or sizeT is None or sizeC is None): return if (pixels.getPhysicalSizeX() is None): commandArgs["Scalebar"] = 0 cRange = range(0, sizeC) cWindows = None cColours = None if "ChannelsExtended" in commandArgs and \ validChannels(commandArgs["ChannelsExtended"], sizeC): cRange = [] cWindows = [] cColours = [] for c in commandArgs["ChannelsExtended"]: m = re.match('^(?P<i>\d+)(\|(?P<ws>\d+)' + '\:(?P<we>\d+))?(\$(?P<c>.+))?$', c) if m is not None: cRange.append(int(m.group('i'))-1) cWindows.append([float(m.group('ws')), float(m.group('we'))]) cColours.append(m.group('c')) elif "Channels" in commandArgs and \ validChannels(commandArgs["Channels"], sizeC): cRange = commandArgs["Channels"] tzList = calculateRanges(sizeZ, sizeT, commandArgs) timeMap = calculateAquisitionTime(conn, pixelsId, cRange, tzList) if (timeMap is None): commandArgs["Show_Time"] = False if (timeMap is not None): if (len(timeMap) == 0): commandArgs["Show_Time"] = False frameNo = 1 omeroImage.setActiveChannels(map(lambda x: x+1, cRange), cWindows, cColours) renderingEngine = omeroImage._re overlayColour = (255, 255, 255) if "Overlay_Colour" in commandArgs: r, g, b, a = COLOURS[commandArgs["Overlay_Colour"]] overlayColour = (r, g, b) canvasColour = tuple(COLOURS[commandArgs["Canvas_Colour"]][:3]) mw = commandArgs["Min_Width"] if mw < sizeX: mw = sizeX mh = commandArgs["Min_Height"] if mh < sizeY: mh = sizeY ovlpos = None canvas = None if sizeX < mw or sizeY < mh: ovlpos = ((mw-sizeX) / 2, (mh-sizeY) / 2) canvas = Image.new("RGBA", (mw, mh), canvasColour) format = commandArgs["Format"] fileNames = [] # add intro... if "Intro_Slide" in commandArgs and commandArgs["Intro_Slide"].id: intro_duration = commandArgs["Intro_Duration"] intro_fileId = commandArgs["Intro_Slide"].id.val intro_filenames = write_intro_end_slides( conn, commandArgs, intro_fileId, intro_duration, mw, mh) fileNames.extend(intro_filenames) # prepare watermark if "Watermark" in commandArgs and commandArgs["Watermark"].id: watermark = prepareWatermark(conn, commandArgs, mw, mh) # add movie frames... for tz in tzList: t = tz[0] z = tz[1] plane = getPlane(renderingEngine, z, t) planeImage = numpy.array(plane, dtype='uint32') planeImage = planeImage.byteswap() planeImage = planeImage.reshape(sizeX, sizeY) image = Image.frombuffer('RGBA', (sizeX, sizeY), planeImage.data, 'raw', 'ARGB', 0, 1) if ovlpos is not None: image2 = canvas.copy() image2.paste(image, ovlpos, image) image = image2 if "Scalebar" in commandArgs and commandArgs["Scalebar"]: image = addScalebar( commandArgs["Scalebar"], image, pixels, commandArgs) planeInfo = "z:"+str(z)+"t:"+str(t) if "Show_Time" in commandArgs and commandArgs["Show_Time"]: time = timeMap[planeInfo] image = addTimePoints(time, pixels, image, overlayColour) if "Show_Plane_Info" in commandArgs and \ commandArgs["Show_Plane_Info"]: image = addPlaneInfo(z, t, pixels, image, overlayColour) if "Watermark" in commandArgs and commandArgs["Watermark"].id: image = pasteWatermark(image, watermark) if format == QT: filename = str(frameNo) + '.png' image.save(filename, "PNG") else: filename = str(frameNo) + '.jpg' image.save(filename, "JPEG") fileNames.append(filename) frameNo += 1 # add exit frames... "outro" # add intro... if "Ending_Slide" in commandArgs and commandArgs["Ending_Slide"].id: end_duration = commandArgs["Ending_Duration"] end_fileId = commandArgs["Ending_Slide"].id.val end_filenames = write_intro_end_slides( conn, commandArgs, end_fileId, end_duration, mw, mh) fileNames.extend(end_filenames) filelist = ",".join(fileNames) ext = formatMap[format] movieName = "Movie" if "Movie_Name" in commandArgs: movieName = commandArgs["Movie_Name"] movieName = os.path.basename(movieName) if not movieName.endswith(".%s" % ext): movieName = "%s.%s" % (movieName, ext) # spaces etc in file name cause problems movieName = re.sub("[$&\;|\(\)<>' ]", "", movieName) framesPerSec = 2 if "FPS" in commandArgs: framesPerSec = commandArgs["FPS"] output = "localfile.%s" % ext buildAVI(mw, mh, filelist, framesPerSec, output, format) mimetype = formatMimetypes[format] if not os.path.exists(output): print "mencoder Failed to create movie file: %s" % output return None, "Failed to create movie file: %s" % output if not commandArgs["Do_Link"]: originalFile = scriptUtil.createFile( updateService, output, mimetype, movieName) scriptUtil.uploadFile(rawFileStore, originalFile, movieName) return originalFile, message namespace = NSCREATED + "/omero/export_scripts/Make_Movie" fileAnnotation, annMessage = scriptUtil.createLinkFileAnnotation( conn, output, omeroImage, ns=namespace, mimetype=mimetype, origFilePathAndName=movieName) message += annMessage return fileAnnotation._obj, message
def splitViewFigure(conn, scriptParams): """ Processes the arguments, populating defaults if necessary. Prints the details to log (fig-legend). Even handles missing arguments that are not optional (from when this ran from commandline with everything optional) then calls makeSplitViewFigure() to make the figure, attaches it to the Image as an 'originalFile' annotation, with fig-legend as the description. @return: the id of the originalFileLink child. (ID object, not value) """ log("Split-View figure created by OMERO on %s" % date.today()) log("") message="" # message to be returned to the client imageIds = [] pixelIds = [] imageLabels = [] # function for getting image labels. def getLabels(fullName, tagsList, pdList): name = fullName.split("/")[-1] return [name] # default function for getting labels is getName (or use datasets / tags) if scriptParams["Image_Labels"] == "Datasets": def getDatasets(name, tagsList, pdList): return [dataset for project, dataset in pdList] getLabels = getDatasets elif scriptParams["Image_Labels"] == "Tags": def getTags(name, tagsList, pdList): return tagsList getLabels = getTags # Get the images images, logMessage = scriptUtil.getObjects(conn, scriptParams) message += logMessage if not images: return None, message # Attach figure to the first image omeroImage = images[0] # process the list of images log("Image details:") for image in images: imageIds.append(image.getId()) pixelIds.append(image.getPrimaryPixels().getId()) pdMap = figUtil.getDatasetsProjectsFromImages(conn.getQueryService(), imageIds) # a map of imageId : list of (project, dataset) names. tagMap = figUtil.getTagsFromImages(conn.getMetadataService(), imageIds) # Build a legend entry for each image for image in images: name = image.getName() imageDate = image.getAcquisitionDate() iId = image.getId() tagsList = tagMap[iId] pdList = pdMap[iId] tags = ", ".join(tagsList) pdString = ", ".join(["%s/%s" % pd for pd in pdList]) log(" Image: %s ID: %d" % (name, iId)) log(" Date: %s" % date.fromtimestamp(imageDate/1000)) log(" Tags: %s" % tags) log(" Project/Datasets: %s" % pdString) imageLabels.append(getLabels(name, tagsList, pdList)) # use the first image to define dimensions, channel colours etc. sizeX = omeroImage.getSizeX() sizeY = omeroImage.getSizeY() sizeZ = omeroImage.getSizeZ() sizeC = omeroImage.getSizeC() # set image dimensions zStart = -1 zEnd = -1 if "Z_Start" in scriptParams: zStart = scriptParams["Z_Start"] if "Z_End" in scriptParams: zEnd = scriptParams["Z_End"] width = "Width" in scriptParams and scriptParams["Width"] or sizeX height = "Height" in scriptParams and scriptParams["Height"] or sizeY log("Image dimensions for all panels (pixels): width: %d height: %d" % (width, height)) # Make split-indexes list. If argument wasn't specified, include them all. splitIndexes = [] if "Split_Indexes" in scriptParams: splitIndexes = scriptParams["Split_Indexes"] else: splitIndexes = range(sizeC) # Make channel-names map. If argument wasn't specified, name by index channelNames = {} for c in range(sizeC): channelNames[c] = str(c) if "Channel_Names" in scriptParams: cNameMap = scriptParams["Channel_Names"] for c in cNameMap: index = int(c) channelNames[index] = cNameMap[c] mergedIndexes = [] # the channels in the combined image, mergedColours = {} if "Merged_Colours" in scriptParams: cColourMap = scriptParams["Merged_Colours"] for c in cColourMap: rgb = cColourMap[c] rgba = imgUtil.RGBIntToRGBA(rgb) mergedColours[int(c)] = rgba mergedIndexes.append(int(c)) mergedIndexes.sort() else: mergedIndexes = range(sizeC) colourChannels = not scriptParams["Split_Panels_Grey"] algorithm = omero.constants.projection.ProjectionType.MAXIMUMINTENSITY if "Mean Intensity" == scriptParams["Algorithm"]: algorithm = omero.constants.projection.ProjectionType.MEANINTENSITY stepping = min(scriptParams["Stepping"], sizeZ) scalebar = None if "Scalebar" in scriptParams: scalebar = scriptParams["Scalebar"] log("Scalebar is %d microns" % scalebar) r,g,b,a = OVERLAY_COLOURS[scriptParams["Overlay_Colour"]] overlayColour = (r,g,b) mergedNames = scriptParams["Merged_Names"] print "splitIndexes", splitIndexes print "channelNames", channelNames print "colourChannels", colourChannels print "mergedIndexes", mergedIndexes print "mergedColours", mergedColours print "mergedNames", mergedNames fig = makeSplitViewFigure(conn, pixelIds, zStart, zEnd, splitIndexes, channelNames, colourChannels, mergedIndexes, mergedColours, mergedNames, width, height, imageLabels, algorithm, stepping, scalebar, overlayColour) figLegend = "\n".join(logStrings) format = JPEG if scriptParams["Format"] == "PNG": format = PNG output = scriptParams["Figure_Name"] if format == PNG: output = output + ".png" fig.save(output, "PNG") mimetype = "image/png" else: output = output + ".jpg" fig.save(output) mimetype = "image/jpeg" # Upload the figure 'output' to the server, creating a file annotation and attaching it to the omeroImage, adding the # figLegend as the fileAnnotation description. namespace = omero.constants.namespaces.NSCREATED+"/omero/figure_scripts/Split_View_Figure" fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation(conn, output, omeroImage, output="Split view figure", mimetype=mimetype, ns=namespace, desc=figLegend) message += faMessage return fileAnnotation, message
def run_processing(conn,script_params): file_anns = [] message = "" imageIds = [] image_id = script_params['ImageID'] imageIds.append(image_id) image = conn.getObject("Image",image_id) if not image: message = 'Could not find specified image' return message file_id = script_params['AnnotationID'] ann = conn.getObject("Annotation",file_id) if not ann: message = 'Could not find specified annotation' return message #other parameters if script_params['Convert_coordinates_to_nm']: cam_pix_size = script_params['Parent_Image_Pixel_Size'] else: cam_pix_size = 1 file_type = FILE_TYPES[script_params['File_Type']] path_to_ann = ann.getFile().getPath() + '/' + ann.getFile().getName() name,ext = os.path.splitext(path_to_ann) if ('txt' in ext) or ('csv' in ext): #get the path to the downloaded data path_to_data = download_data(ann) #get all the xy coords in that data locs = parse_sr_data(path_to_data,file_type,cam_pix_size) sizeC = len(locs) #get the rois to be processed rectangles = get_rectangles(conn,image_id) #calculate near neighbour distances nn_hist,bins = process_data(conn,image,file_type,sizeC,rectangles,locs) print 'nn_hist:',len(nn_hist),nn_hist[0].shape print 'bins:',bins.shape #put the data in an omero table # put_data_in_table(conn,imageIds,rectIds,nn_data,nn_hist,bins) #write the data to a csv file_name = "near_neighbours_" + ann.getFile().getName()[:-4] + '.csv' with file(file_name, 'w') as outfile: outfile.write('# nearest neighbour data for %s channels and %s ROIs: \n' % (len(nn_hist), nn_hist[0].shape[1] )) for c, channel in enumerate(nn_hist): data = np.concatenate((bins,channel),axis=1) np.savetxt(outfile, data, fmt='%-7.2f', delimiter=',', newline='\n') if c > 0: outfile.write('# New channel\n') new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="Wrote near neighbour csv (Excel) file", mimetype="text/csv", desc=None) if new_file_ann: file_anns.append(new_file_ann) if not file_anns: faMessage = "No Analysis files created. See 'Info' or 'Error' for"\ " more details" elif len(file_anns) > 1: faMessage = "Created %s csv (Excel) files" % len(file_anns) else: message = 'file annotation must be txt or csv' return message # clean up delete_downloaded_data(ann) message += faMessage return message
def batchImageExport(conn, scriptParams): # for params with default values, we can get the value directly splitCs = scriptParams["Export_Individual_Channels"] mergedCs = scriptParams["Export_Merged_Image"] greyscale = scriptParams["Individual_Channels_Grey"] dataType = scriptParams["Data_Type"] ids = scriptParams["IDs"] folder_name = scriptParams["Folder_Name"] format = scriptParams["Format"] projectZ = "Choose_Z_Section" in scriptParams and scriptParams[ "Choose_Z_Section"] == 'Max projection' if (not splitCs) and (not mergedCs): log("Not chosen to save Individual Channels OR Merged Image") return # check if we have these params channelNames = [] if "Channel_Names" in scriptParams: channelNames = scriptParams["Channel_Names"] imgWidth = None if "Image_Width" in scriptParams: imgWidth = scriptParams["Image_Width"] # functions used below for each imaage. def getZrange(sizeZ, scriptParams): zRange = None if "Choose_Z_Section" in scriptParams: zChoice = scriptParams["Choose_Z_Section"] # NB: all Z indices in this script are 1-based if zChoice == 'ALL Z planes': zRange = (1, sizeZ + 1) elif "OR_specify_Z_index" in scriptParams: zIndex = scriptParams["OR_specify_Z_index"] zIndex = min(zIndex, sizeZ) zRange = (zIndex, ) elif "OR_specify_Z_start_AND..." in scriptParams and "...specify_Z_end" in scriptParams: start = scriptParams["OR_specify_Z_start_AND..."] start = min(start, sizeZ) end = scriptParams["...specify_Z_end"] end = min(end, sizeZ) zStart = min(start, end) # in case user got zStart and zEnd mixed up zEnd = max(start, end) if zStart == zEnd: zRange = (zStart, ) else: zRange = (zStart, zEnd + 1) return zRange def getTrange(sizeT, scriptParams): tRange = None if "Choose_T_Section" in scriptParams: tChoice = scriptParams["Choose_T_Section"] # NB: all T indices in this script are 1-based if tChoice == 'ALL T planes': tRange = (1, sizeT + 1) elif "OR_specify_T_index" in scriptParams: tIndex = scriptParams["OR_specify_T_index"] tIndex = min(tIndex, sizeT) tRange = (tIndex, ) elif "OR_specify_T_start_AND..." in scriptParams and "...specify_T_end" in scriptParams: start = scriptParams["OR_specify_T_start_AND..."] start = min(start, sizeT) end = scriptParams["...specify_T_end"] end = min(end, sizeT) tStart = min(start, end) # in case user got zStart and zEnd mixed up tEnd = max(start, end) if tStart == tEnd: tRange = (tStart, ) else: tRange = (tStart, tEnd + 1) return tRange # Get the images or datasets message = "" objects, logMessage = script_utils.getObjects(conn, scriptParams) message += logMessage if not objects: return None, message # Attach figure to the first image parent = objects[0] if dataType == 'Dataset': images = [] for ds in objects: images.extend(list(ds.listChildren())) if not images: message += "No image found in dataset(s)" return None, message else: images = objects log("Processing %s images" % len(images)) # somewhere to put images curr_dir = os.getcwd() exp_dir = os.path.join(curr_dir, folder_name) try: os.mkdir(exp_dir) except: pass # do the saving to disk if format == 'OME-TIFF': for img in images: log("Exporting image as OME-TIFF: %s" % img.getName()) saveAsOmeTiff(conn, img, folder_name) else: for img in images: log("\n----------- Saving planes from image: '%s' ------------" % img.getName()) sizeC = img.getSizeC() sizeZ = img.getSizeZ() sizeT = img.getSizeT() zRange = getZrange(sizeZ, scriptParams) tRange = getTrange(sizeT, scriptParams) log("Using:") if zRange is None: log(" Z-index: Last-viewed") elif len(zRange) == 1: log(" Z-index: %d" % zRange[0]) else: log(" Z-range: %s-%s" % (zRange[0], zRange[1] - 1)) if projectZ: log(" Z-projection: ON") if tRange is None: log(" T-index: Last-viewed") elif len(tRange) == 1: log(" T-index: %d" % tRange[0]) else: log(" T-range: %s-%s" % (tRange[0], tRange[1] - 1)) log(" Format: %s" % format) if imgWidth is None: log(" Image Width: no resize") else: log(" Image Width: %s" % imgWidth) log(" Greyscale: %s" % greyscale) log("Channel Rendering Settings:") for ch in img.getChannels(): log(" %s: %d-%d" % (ch.getLabel(), ch.getWindowStart(), ch.getWindowEnd())) savePlanesForImage(conn, img, sizeC, splitCs, mergedCs, channelNames, zRange, tRange, greyscale, imgWidth, projectZ=projectZ, format=format, folder_name=folder_name) # write log for exported images (not needed for ome-tiff) logFile = open(os.path.join(exp_dir, 'Batch_Image_Export.txt'), 'w') try: for s in logStrings: logFile.write(s) logFile.write("\n") finally: logFile.close() # zip everything up (unless we've only got a single ome-tiff) if format == 'OME-TIFF' and len(os.listdir(exp_dir)) == 1: export_file = os.path.join(folder_name, os.listdir(exp_dir)[0]) mimetype = 'image/tiff' else: export_file = "%s.zip" % folder_name compress(export_file, folder_name) mimetype = 'application/zip' namespace = omero.constants.namespaces.NSCREATED + "/omero/export_scripts/Batch_Image_Export" fileAnnotation, annMessage = script_utils.createLinkFileAnnotation( conn, export_file, parent, output="Batch export zip", ns=namespace, mimetype=mimetype) message += annMessage return fileAnnotation, message
def process_data(conn,script_params,image,file_type,sizeC,rectangles,coords,rmax): """ Calculates the ripley l function for coordinates in user-defined rectangular region of interest @param conn: the BlitzGateWay connection @param image: the image being processed @param sizeC: number of channels in the image @param rectangles: the regions of interest @param coords: the localisation coordinates @param rmax: maximum distance scale for Ripley calculation """ updateService = conn.getUpdateService() parentDataset = image.getParent() parentProject = parentDataset.getParent() x = FILE_TYPES[file_type]['x_col'] y = FILE_TYPES[file_type]['y_col'] frame = FILE_TYPES[file_type]['frame'] sizeZ = 1 sizeT = image.getSizeT() if sizeT > 1: desc = image.getDescription() if desc: start = desc.index('Start') stop = desc.index('Stop') starts = desc[start+7:stop-3] starts = [int(s) for s in starts.split(',')] stops = desc[stop+6:len(desc)-1] stops = [int(s) for s in stops.split(',')] else: starts = [1] stops = [coords[0][frame].max] new_images = [] new_ids = [] for r,rect in enumerate(rectangles): planes = [] ldf = [] for c in range(sizeC): locs_df = coords[c] tt = 0 for t in range(sizeT): conn.keepAlive() coords_in_frames = locs_df[(locs_df[frame]>= starts[t]) & (locs_df[frame]<= stops[t])] locs = get_coords_in_roi(coords_in_frames,rect,file_type) box = [rect[0],rect[0]+rect[2],rect[1],rect[1]+rect[3]] pixelsX = math.ceil(float(rect[2] / 50.0)) pixelsY = math.ceil(float(rect[3] / 50.0)) print 'pixelsX,pixelsY:',pixelsX,pixelsY if len(locs.index)>0: l = ripleykperpoint(locs.loc[:,[x,y]].values,rmax,box,0) tx = np.linspace(box[0], box[1], pixelsX) ty = np.linspace(box[2], box[3], pixelsY) XI, YI = np.meshgrid(tx, ty) print 'num x vals,num y vals:',locs.loc[:,[x]].values.shape,locs.loc[:,[y]].values.shape rbf = Rbf(locs.loc[:,[x]].values, locs.loc[:,[y]].values, l, epsilon=2) ZI = rbf(XI, YI) ZI[ZI<0.0] = 0.0 else: ZI = np.zeros((pixelsY,pixelsX)) l = np.zeros(100) print 'ZI shape:',ZI.shape planes.append(ZI) ldf.append(pd.DataFrame(l)) ripley_df = pd.concat(ldf,join='outer',axis=1) def plane_gen(): for p in planes: yield p imageName = 'Clusters_ROI_ID%s.ome.tif'%rect[-1] description = "Cluster map for ROI%s of ImageID%s" % (rect[-1],image.getId()) newImg = conn.createImageFromNumpySeq( plane_gen(), imageName, sizeZ=sizeZ, sizeC=sizeC, sizeT=sizeT, description=description) new_images.append(newImg) new_ids.append(newImg.getId()) file_name = "RLPP_ROI%s.csv" % (r) with open(file_name,'w') as f: f.write('# ripley per point data for %s channels and %s timepoints for ROI%s: \n' % (sizeC, sizeT, rect[-1])) ripley_df.to_csv(f,sep=',',float_format='%8.2f',index=False,encoding='utf-8') new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, newImg, output="wrote ripley per point data", mimetype="text/csv", desc=None) if script_params['New_Dataset'] and \ len(script_params['Container_Name'].strip()) > 0: # create a new dataset for new images datasetName = script_params['Container_Name'] print "\nMaking Dataset '%s' of Images from ROIs of Image: %s" \ % (datasetName, image.getId()) dataset = omero.model.DatasetI() dataset.name = rstring(datasetName) desc = "Images in this Dataset are from ROIs of parent Image:\n"\ " Name: %s\n Image ID: %d" % (image.getName(), image.getId()) dataset.description = rstring(desc) dataset = updateService.saveAndReturnObject(dataset) parentDataset = dataset else: # put new images in existing dataset dataset = None if parentDataset is not None and parentDataset.canLink(): parentDataset = parentDataset._obj else: parentDataset = None parentProject = None # don't add Dataset to parent. if parentDataset is None: link = None print "No dataset created or found for new images."\ " Images will be orphans." else: link = [] for cid in new_ids: dsLink = omero.model.DatasetImageLinkI() dsLink.parent = omero.model.DatasetI( parentDataset.id.val, False) dsLink.child = omero.model.ImageI(cid, False) updateService.saveObject(dsLink) link.append(dsLink) if parentProject and parentProject.canLink(): # and put it in the current project projectLink = omero.model.ProjectDatasetLinkI() projectLink.parent = omero.model.ProjectI( parentProject.getId(), False) projectLink.child = omero.model.DatasetI( dataset.id.val, False) updateService.saveAndReturnObject(projectLink) return new_images, dataset, link, new_ids
def roiFigure(conn, commandArgs): """ This processes the script parameters, adding defaults if needed. Then calls a method to make the figure, and finally uploads and attaches this to the primary image. @param: session The OMERO session @param: commandArgs Map of String:Object parameters for the script. Objects are not rtypes, since getValue() was called when the map was processed below. But, list and map objects may contain rtypes (need to call getValue()) @return: the id of the originalFileLink child. (ID object, not value) """ log("ROI figure created by OMERO on %s" % date.today()) log("") message = "" # message to be returned to the client pixelIds = [] imageIds = [] imageLabels = [] # function for getting image labels. def getImageNames(fullName, tagsList, pdList): name = fullName.split("/")[-1] return [name] # default function for getting labels is getName (or use datasets / tags) if "Image_Labels" in commandArgs: if commandArgs["Image_Labels"] == "Datasets": def getDatasets(name, tagsList, pdList): return [dataset for project, dataset in pdList] getLabels = getDatasets elif commandArgs["Image_Labels"] == "Tags": def getTags(name, tagsList, pdList): return tagsList getLabels = getTags else: getLabels = getImageNames else: getLabels = getImageNames # Get the images images, logMessage = scriptUtil.getObjects(conn, commandArgs) message += logMessage if not images: return None, message # Check for rectangular ROIs and filter images list images = [image for image in images if image.getROICount("Rect") > 0] if not images: message += "No rectangle ROI found." return None, message # Attach figure to the first image omeroImage = images[0] # process the list of images log("Image details:") for image in images: imageIds.append(image.getId()) pixelIds.append(image.getPrimaryPixels().getId()) # a map of imageId : list of (project, dataset) names. pdMap = figUtil.getDatasetsProjectsFromImages(conn.getQueryService(), imageIds) tagMap = figUtil.getTagsFromImages(conn.getMetadataService(), imageIds) # Build a legend entry for each image for image in images: name = image.getName() iId = image.getId() imageDate = image.getAcquisitionDate() tagsList = tagMap[iId] pdList = pdMap[iId] tags = ", ".join(tagsList) pdString = ", ".join(["%s/%s" % pd for pd in pdList]) log(" Image: %s ID: %d" % (name, iId)) log(" Date: %s" % date.fromtimestamp(imageDate/1000)) log(" Tags: %s" % tags) log(" Project/Datasets: %s" % pdString) imageLabels.append(getLabels(name, tagsList, pdList)) # use the first image to define dimensions, channel colours etc. sizeX = omeroImage.getSizeX() sizeY = omeroImage.getSizeY() sizeZ = omeroImage.getSizeZ() sizeC = omeroImage.getSizeC() width = sizeX if "Width" in commandArgs: w = commandArgs["Width"] try: width = int(w) except: log("Invalid width: %s Using default value: %d" % (str(w), sizeX)) height = sizeY if "Height" in commandArgs: h = commandArgs["Height"] try: height = int(h) except: log("Invalid height: %s Using default value" % (str(h), sizeY)) log("Image dimensions for all panels (pixels): width: %d height: %d" % (width, height)) # the channels in the combined image, if "Merged_Channels" in commandArgs: # convert to 0-based mergedIndexes = [c-1 for c in commandArgs["Merged_Channels"]] else: mergedIndexes = range(sizeC) # show all mergedIndexes.reverse() # if no colours added, use existing rendering settings. mergedColours = {} # Actually, nicer to always use existing rendering settings. # if "Merged_Colours" in commandArgs: # for i, c in enumerate(commandArgs["Merged_Colours"]): # if c in COLOURS: # mergedColours[i] = COLOURS[c] algorithm = ProjectionType.MAXIMUMINTENSITY if "Algorithm" in commandArgs: a = commandArgs["Algorithm"] if (a == "Mean Intensity"): algorithm = ProjectionType.MEANINTENSITY stepping = 1 if "Stepping" in commandArgs: s = commandArgs["Stepping"] if (0 < s < sizeZ): stepping = s scalebar = None if "Scalebar" in commandArgs: sb = commandArgs["Scalebar"] try: scalebar = int(sb) if scalebar <= 0: scalebar = None else: log("Scalebar is %d microns" % scalebar) except: log("Invalid value for scalebar: %s" % str(sb)) scalebar = None overlayColour = (255, 255, 255) if "Scalebar_Colour" in commandArgs: if commandArgs["Scalebar_Colour"] in OVERLAY_COLOURS: r, g, b, a = OVERLAY_COLOURS[commandArgs["Scalebar_Colour"]] overlayColour = (r, g, b) roiZoom = None if "Roi_Zoom" in commandArgs: roiZoom = float(commandArgs["Roi_Zoom"]) if roiZoom == 0: roiZoom = None maxColumns = None if "Max_Columns" in commandArgs: maxColumns = commandArgs["Max_Columns"] showRoiDuration = False if "Show_ROI_Duration" in commandArgs: showRoiDuration = commandArgs["Show_ROI_Duration"] roiLabel = "FigureROI" if "Roi_Selection_Label" in commandArgs: roiLabel = commandArgs["Roi_Selection_Label"] spacer = (width/50) + 2 print "showRoiDuration", showRoiDuration fig = getSplitView( conn, imageIds, pixelIds, mergedIndexes, mergedColours, width, height, imageLabels, spacer, algorithm, stepping, scalebar, overlayColour, roiZoom, maxColumns, showRoiDuration, roiLabel) # fig.show() # bug-fixing only if fig is None: logMessage = "No figure produced" log("\n"+logMessage) message += logMessage return None, message figLegend = "\n".join(logStrings) # print figLegend # bug fixing only format = commandArgs["Format"] figureName = "movieROIFigure" if "Figure_Name" in commandArgs: figureName = commandArgs["Figure_Name"] figureName = os.path.basename(figureName) output = "localfile" if format == 'PNG': output = output + ".png" figureName = figureName + ".png" fig.save(output, "PNG") mimetype = "image/png" elif format == 'TIFF': output = output + ".tiff" figureName = figureName + ".tiff" fig.save(output, "TIFF") mimetype = "image/tiff" else: output = output + ".jpg" figureName = figureName + ".jpg" fig.save(output) mimetype = "image/jpeg" # Use util method to upload the figure 'output' to the server, attaching # it to the omeroImage, adding the # figLegend as the fileAnnotation description. # Returns the id of the originalFileLink child. (ID object, not value) namespace = NSCREATED + "/omero/figure_scripts/Movie_ROI_Figure" fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation( conn, output, omeroImage, output="Movie ROI figure", mimetype=mimetype, ns=namespace, desc=figLegend, origFilePathAndName=figureName) message += faMessage return fileAnnotation, message
def run_processing(conn,script_params): """ Collects params and starts the processing @param conn: the BlitzGateWay connection @param script_params: the parameters collected from the script input """ file_anns = [] message = "" rmax = script_params['Max_radius'] expo_fit = script_params['Fit_exponential_model'] expo_params = [script_params['Exponential_baseline'],\ script_params['Exponential_amplitude'],\ script_params['Exponential_decay']] expogauss_fit = script_params['Fit_exponential+gaussian_model'] expogauss_params = [script_params['Density'],script_params['PSF'],\ script_params['Amplitude'],script_params['Decay'],\ script_params['Baseline']] image_ids = script_params['IDs'] for image in conn.getObjects("Image",image_ids): if not image: message = 'Could not find specified image' return message image_id = image.getId() sizeC = image.getSizeC() corr_func = script_params['Pair_correlation'] if ('cross' in corr_func) and (sizeC != 2): return 'image should have two channels to cross-correlate' message,output = process_data(conn,image,corr_func,rmax,expo_fit,expo_params,\ expogauss_fit,expogauss_params) if output: file_name = "image%s_%s_correlation.csv" % (image_id,corr_func) with file(file_name, 'w') as outfile: outfile.write('# auto correlation data for %s ROIs and %s channels: \n' %\ (len(output), output[0]['correlation'].shape[1] )) for r, pair_corr in enumerate(output): header = 'radius,' data = np.concatenate((pair_corr['radius'],pair_corr['correlation']),axis=1) for i in range(pair_corr['correlation'].shape[1]): header += 'correlation,' outfile.write('# Region of interest %s\n' % r) if pair_corr['exponential_fit'] is not None: outfile.write('exponential fit params for ROI %s: \n' % r) outfile.write('Baseline: %s \n' % pair_corr['exponential_params'][:,0]) outfile.write('Amplitude: %s \n' % pair_corr['exponential_params'][:,1]) outfile.write('Decay: %s \n' % pair_corr['exponential_params'][:,2]) for i in range(pair_corr['exponential_fit'].shape[1]): header += 'fit,' data = np.concatenate((data,pair_corr['exponential_fit']),axis=1) if pair_corr['exponential+gaussian_fit'] is not None: outfile.write('exponential+gaussian fit params for ROI %s: \n' % r) outfile.write('Density: %s \n' % pair_corr['exponential+gaussiam_params'][:,0]) outfile.write('PSF: %s \n' % pair_corr['exponential+gaussiam_params'][:,1]) outfile.write('Amplitude: %s \n' % pair_corr['exponential+gaussiam_params'][:,2]) outfile.write('Decay: %s \n' % pair_corr['exponential+gaussiam_params'][:,3]) outfile.write('Baseline: %s \n' % pair_corr['exponential+gaussiam_params'][:,4]) for i in range(pair_corr['exponential+gaussian_fit'].shape[1]): header += 'fit,' data = np.concatenate((data,pair_corr['exponential+gaussian_fit']),axis=1) outfile.write(header[:-1] + '\n') np.savetxt(outfile, data, fmt='%-7.2f', delimiter=',', newline='\n') new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="Pair correlation csv file", mimetype="text/csv", desc=None) if new_file_ann: file_anns.append(new_file_ann) if not file_anns: faMessage = "No Analysis files created. See 'Info' or 'Error' for"\ " more details" elif len(file_anns) > 1: faMessage = "Created %s csv (Excel) files" % len(file_anns) message += faMessage if script_params['Email_Results'] and file_anns: email_results(conn,script_params,image_ids,file_anns) return message
def movieFigure(conn, commandArgs): """ Makes the figure using the parameters in @commandArgs, attaches the figure to the parent Project/Dataset, and returns the file-annotation ID @param session The OMERO session @param commandArgs Map of parameters for the script @ returns Returns the id of the originalFileLink child. (ID object, not value) """ log("Movie figure created by OMERO on %s" % date.today()) log("") timeLabels = {"SECS_MILLIS": "seconds", "SECS": "seconds", "MINS": "minutes", "HOURS": "hours", "MINS_SECS": "mins:secs", "HOURS_MINS": "hours:mins"} timeUnits = "SECS" if "Time_Units" in commandArgs: timeUnits = commandArgs["Time_Units"] # convert from UI name to timeLabels key timeUnits = timeUnits.replace(" ", "_") if timeUnits not in timeLabels.keys(): timeUnits = "SECS" log("Time units are in %s" % timeLabels[timeUnits]) pixelIds = [] imageIds = [] imageLabels = [] message = "" # message to be returned to the client # function for getting image labels. def getImageNames(fullName, tagsList, pdList): name = fullName.split("/")[-1] return [name] # default function for getting labels is getName (or use datasets / tags) if "Image_Labels" in commandArgs: if commandArgs["Image_Labels"] == "Datasets": def getDatasets(name, tagsList, pdList): return [dataset for project, dataset in pdList] getLabels = getDatasets elif commandArgs["Image_Labels"] == "Tags": def getTags(name, tagsList, pdList): return tagsList getLabels = getTags else: getLabels = getImageNames else: getLabels = getImageNames # Get the images images, logMessage = scriptUtil.getObjects(conn, commandArgs) message += logMessage if not images: return None, message # Attach figure to the first image omeroImage = images[0] # process the list of images log("Image details:") for image in images: imageIds.append(image.getId()) pixelIds.append(image.getPrimaryPixels().getId()) # a map of imageId : list of (project, dataset) names. pdMap = figUtil.getDatasetsProjectsFromImages( conn.getQueryService(), imageIds) tagMap = figUtil.getTagsFromImages(conn.getMetadataService(), imageIds) # Build a legend entry for each image for image in images: name = image.getName() iId = image.getId() imageDate = image.getAcquisitionDate() tagsList = tagMap[iId] pdList = pdMap[iId] tags = ", ".join(tagsList) pdString = ", ".join(["%s/%s" % pd for pd in pdList]) log(" Image: %s ID: %d" % (name, iId)) if imageDate: log(" Date: %s" % imageDate) else: log(" Date: not set") log(" Tags: %s" % tags) log(" Project/Datasets: %s" % pdString) imageLabels.append(getLabels(name, tagsList, pdList)) # use the first image to define dimensions, channel colours etc. sizeX = omeroImage.getSizeX() sizeY = omeroImage.getSizeY() sizeZ = omeroImage.getSizeZ() sizeT = omeroImage.getSizeT() tIndexes = [] if "T_Indexes" in commandArgs: for t in commandArgs["T_Indexes"]: tIndexes.append(t) print "T_Indexes", tIndexes if len(tIndexes) == 0: # if no t-indexes given, use all t-indices tIndexes = range(sizeT) zStart = -1 zEnd = -1 if "Z_Start" in commandArgs: zStart = commandArgs["Z_Start"] if "Z_End" in commandArgs: zEnd = commandArgs["Z_End"] width = sizeX if "Width" in commandArgs: width = commandArgs["Width"] height = sizeY if "Height" in commandArgs: height = commandArgs["Height"] spacer = (width/25) + 2 algorithm = ProjectionType.MAXIMUMINTENSITY if "Algorithm" in commandArgs: a = commandArgs["Algorithm"] if (a == "Mean Intensity"): algorithm = ProjectionType.MEANINTENSITY stepping = 1 if "Stepping" in commandArgs: s = commandArgs["Stepping"] if (0 < s < sizeZ): stepping = s scalebar = None if "Scalebar" in commandArgs: sb = commandArgs["Scalebar"] try: scalebar = int(sb) if scalebar <= 0: scalebar = None else: log("Scalebar is %d microns" % scalebar) except: log("Invalid value for scalebar: %s" % str(sb)) scalebar = None overlayColour = (255, 255, 255) if "Scalebar_Colour" in commandArgs: r, g, b, a = OVERLAY_COLOURS[commandArgs["Scalebar_Colour"]] overlayColour = (r, g, b) maxColCount = 10 if "Max_Columns" in commandArgs: maxColCount = commandArgs["Max_Columns"] figure = createMovieFigure( conn, pixelIds, tIndexes, zStart, zEnd, width, height, spacer, algorithm, stepping, scalebar, overlayColour, timeUnits, imageLabels, maxColCount) log("") figLegend = "\n".join(logLines) # print figLegend # bug fixing only format = commandArgs["Format"] figureName = "movieFigure" if "Figure_Name" in commandArgs: figureName = str(commandArgs["Figure_Name"]) figureName = os.path.basename(figureName) output = "localfile" if format == 'PNG': output = output + ".png" figureName = figureName + ".png" figure.save(output, "PNG") mimetype = "image/png" elif format == 'TIFF': output = output + ".tiff" figureName = figureName + ".tiff" figure.save(output, "TIFF") mimetype = "image/tiff" else: output = output + ".jpg" figureName = figureName + ".jpg" figure.save(output) mimetype = "image/jpeg" namespace = NSCREATED + "/omero/figure_scripts/Movie_Figure" fileAnnotation, faMessage = scriptUtil.createLinkFileAnnotation( conn, output, omeroImage, output="Movie figure", mimetype=mimetype, ns=namespace, desc=figLegend, origFilePathAndName=figureName) message += faMessage return fileAnnotation, message
def run_processing(conn, script_params): file_anns = [] message = "" imageIds = [] image_id = script_params['ImageID'] imageIds.append(image_id) image = conn.getObject("Image", image_id) if not image: message = 'Could not find specified image' return message file_id = script_params['AnnotationID'] ann = conn.getObject("Annotation", file_id) if not ann: message = 'Could not find specified annotation' return message radius = script_params['Radius'] #other parameters if script_params['Convert_coordinates_to_nm']: cam_pix_size = script_params['Parent_Image_Pixel_Size'] else: cam_pix_size = 1 file_type = FILE_TYPES[script_params['File_Type']] path_to_ann = ann.getFile().getPath() + '/' + ann.getFile().getName() name, ext = os.path.splitext(path_to_ann) if ('txt' in ext) or ('csv' in ext): #get the path to the downloaded data path_to_data = download_data(ann) #get all the xy coords in that data locs = parse_sr_data(path_to_data, file_type, cam_pix_size) sizeC = len(locs) #get the rois to be processed rectangles = get_rectangles(conn, image_id) print 'rectanges:', rectangles #calculate local density locs_density = process_data(conn, image, file_type, sizeC, rectangles, locs, radius) #write the data to a csv file_name = "localisation_density_" + ann.getFile().getName( )[:-4] + '.csv' with file(file_name, 'a') as outfile: outfile.write( '# localisation density data for %s channels and %s ROIs: \n' % (sizeC, len(locs_density))) for r in range(len(locs_density)): outfile.write('# ROI %s\n' % rectangles[r][-1]) outfile.write( 'Channel,%s,%s,Density within %s [nm]\n' % (file_type['x_col'], file_type['y_col'], str(radius))) density = locs_density[r][1:, :] np.savetxt(outfile, density, fmt='%-7.2f', delimiter=',', newline='\n') new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="Wrote localisation density csv (Excel) file", mimetype="text/csv", desc=None) if new_file_ann: file_anns.append(new_file_ann) if not file_anns: faMessage = "No Analysis files created. See 'Info' or 'Error' for"\ " more details" elif len(file_anns) > 1: faMessage = "Created %s csv (Excel) files" % len(file_anns) else: message = 'file annotation must be txt or csv' return message # clean up delete_downloaded_data(ann) message += faMessage return message
def run_processing(conn, script_params): """ Collects params and starts the processing @param conn: the BlitzGateWay connection @param script_params: the parameters collected from the script input """ file_anns = [] message = "" rmax = script_params['Max_radius'] expo_fit = script_params['Fit_exponential_model'] expo_params = [script_params['Exponential_baseline'],\ script_params['Exponential_amplitude'],\ script_params['Exponential_decay']] expogauss_fit = script_params['Fit_exponential+gaussian_model'] expogauss_params = [script_params['Density'],script_params['PSF'],\ script_params['Amplitude'],script_params['Decay'],\ script_params['Baseline']] image_ids = script_params['IDs'] for image in conn.getObjects("Image", image_ids): if not image: message = 'Could not find specified image' return message image_id = image.getId() sizeC = image.getSizeC() corr_func = script_params['Pair_correlation'] if ('cross' in corr_func) and (sizeC != 2): return 'image should have two channels to cross-correlate' message,output = process_data(conn,image,corr_func,rmax,expo_fit,expo_params,\ expogauss_fit,expogauss_params) if output: file_name = "image%s_%s_correlation.csv" % (image_id, corr_func) with file(file_name, 'w') as outfile: outfile.write('# auto correlation data for %s ROIs and %s channels: \n' %\ (len(output), output[0]['correlation'].shape[1] )) for r, pair_corr in enumerate(output): header = 'radius,' data = np.concatenate( (pair_corr['radius'], pair_corr['correlation']), axis=1) for i in range(pair_corr['correlation'].shape[1]): header += 'correlation,' outfile.write('# Region of interest %s\n' % r) if pair_corr['exponential_fit'] is not None: outfile.write('exponential fit params for ROI %s: \n' % r) outfile.write('Baseline: %s \n' % pair_corr['exponential_params'][:, 0]) outfile.write('Amplitude: %s \n' % pair_corr['exponential_params'][:, 1]) outfile.write('Decay: %s \n' % pair_corr['exponential_params'][:, 2]) for i in range(pair_corr['exponential_fit'].shape[1]): header += 'fit,' data = np.concatenate( (data, pair_corr['exponential_fit']), axis=1) if pair_corr['exponential+gaussian_fit'] is not None: outfile.write( 'exponential+gaussian fit params for ROI %s: \n' % r) outfile.write( 'Density: %s \n' % pair_corr['exponential+gaussiam_params'][:, 0]) outfile.write( 'PSF: %s \n' % pair_corr['exponential+gaussiam_params'][:, 1]) outfile.write( 'Amplitude: %s \n' % pair_corr['exponential+gaussiam_params'][:, 2]) outfile.write( 'Decay: %s \n' % pair_corr['exponential+gaussiam_params'][:, 3]) outfile.write( 'Baseline: %s \n' % pair_corr['exponential+gaussiam_params'][:, 4]) for i in range(pair_corr['exponential+gaussian_fit']. shape[1]): header += 'fit,' data = np.concatenate( (data, pair_corr['exponential+gaussian_fit']), axis=1) outfile.write(header[:-1] + '\n') np.savetxt(outfile, data, fmt='%-7.2f', delimiter=',', newline='\n') new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="Pair correlation csv file", mimetype="text/csv", desc=None) if new_file_ann: file_anns.append(new_file_ann) if not file_anns: faMessage = "No Analysis files created. See 'Info' or 'Error' for"\ " more details" elif len(file_anns) > 1: faMessage = "Created %s csv (Excel) files" % len(file_anns) message += faMessage if script_params['Email_Results'] and file_anns: email_results(conn, script_params, image_ids, file_anns) return message
def run_processing(conn,script_params): file_anns = [] message = "" image_id = script_params['ImageID'] image = conn.getObject("Image",image_id) if not image: message = 'Could not find specified image' return message file_id = script_params['AnnotationID'] ann = conn.getObject("Annotation",file_id) if not ann: message = 'Could not find specified annotation' return message #other parameters sr_pix_size = script_params['SR_pixel_size'] if script_params['Convert_coordinates_to_nm']: cam_pix_size = script_params['Parent_Image_Pixel_Size'] else: cam_pix_size = 1 file_type = script_params['File_Type'] rmax = script_params['Max_radius'] path_to_ann = ann.getFile().getPath() + '/' + ann.getFile().getName() name,ext = os.path.splitext(path_to_ann) if ('txt' in ext) or ('csv' in ext): path_to_data = download_data(ann) coords = parse_sr_data(path_to_data,file_type,cam_pix_size) rectangles = get_rectangles(conn,image_id,sr_pix_size) roi_data = process_data(conn,image,rectangles,coords,rmax) file_name = "ripleyl_plot_" + ann.getFile().getName()[:-4] + '.csv' print file_name try: f = open(file_name,'w') for chan in range(len(roi_data)): for r in range(roi_data.shape[0]): row = roi_data[chan][r,:] f.write(','.join([str(c) for c in row])+'\n') finally: f.close() new_file_ann, faMessage = script_util.createLinkFileAnnotation( conn, file_name, image, output="Ripley L Plot csv (Excel) file", mimetype="text/csv", desc=None) if new_file_ann: file_anns.append(new_file_ann) if not file_anns: faMessage = "No Analysis files created. See 'Info' or 'Error' for"\ " more details" elif len(file_anns) > 1: faMessage = "Created %s csv (Excel) files" % len(file_anns) message += faMessage else: message = 'file annotation must be txt or csv' return message # clean up delete_downloaded_data(ann) message += faMessage return message