def showAsComposite(images, title="Composite", show=True): imps = [] # Collect all images as ImagePlus, checking that they have the same XY dimensions. # (Z doesn't matter) dimensions = None for img in images: if isinstance(img, ImagePlus): imps.append(img) else: imps.append(IL.wrap(img, "")) if not dimensions: dimensions = [imps[-1].getWidth(), imps[-1].getHeight()] else: if imps[-1].width != dimensions[0] or imps[-1].getHeight( ) != dimensions[1]: print "asComposite: dimensions mistach." return imp = ImagePlus(title, StacksAsChannels([imp.getStack() for imp in imps])) imp.setDimensions(len(imps), max(imp.getStack().getSize() for imp in imps), 1) comp = CompositeImage(imp, CompositeImage.COMPOSITE) if show: comp.show() print imp.getNChannels(), imp.getNSlices(), imp.getNFrames( ), "but imps: ", len(imps) return comp
def withVirtualStack(time_window=None, subsample=None): with open(os.path.join(baseDir, csvFilename), 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='"') header = reader.next() peaks = [ RealPoint.wrap(imap(float, peak.split('::'))) for peak in islice(header, 1, None) ] frames = [ virtualPointsRAI(peaks, radius, interval, inside=to8bitRange( map(float, islice(row, 1, None)))) for row in reader ] if time_window: first, last = time_window frames = frames[first:last + 1] img4D = Views.stack(frames) # Scale by a factor of 'subsample' in every dimension by nearest neighbor, sort of: if subsample: img4D = Views.subsample(img4D, subsample) imp = ImagePlus("deltaF/F", ImageJVirtualStackUnsignedByte.wrap(img4D)) imp.setDimensions(1, img4D.dimension(2), img4D.dimension(3)) imp.setDisplayRange(0, 255) com = CompositeImage(imp, CompositeImage.GRAYSCALE) com.show() univ = Image3DUniverse(512, 512) univ.show() univ.addVoltex(com)
def main(): img = IJF.wrap(imp) img_out = smooth_temporal_gradient(ops, img, sigma_xy, sigma_t, frame_start, frame_end, normalize_output) img_out.setCalibration(imp.getCalibration().copy()) comp = CompositeImage(img_out, CompositeImage.COMPOSITE) comp.show()
def showStack(img, title="", proper=True, n_channels=1): # IL.wrap fails: shows slices as channels, and channels as frames if not proper: imp = IL.wrap(img, title) imp.show() return imp # Proper sorting of slices, channels and frames imp = wrap(img, title=title, n_channels=n_channels) comp = CompositeImage( imp, CompositeImage.GRAYSCALE if 1 == n_channels else CompositeImage.COLOR) comp.show() return comp
def extractFrame(imp, nFrame): """extract a frame from the image, returning a new 16 bit imagePlus labelled with the channel name""" stack = imp.getImageStack() fr = ImageStack(imp.width, imp.height) for i in range(1, imp.getNSlices() + 1): for nChannel in range(1, imp.getNChannels() + 1): index = imp.getStackIndex(nChannel, i, nFrame) fr.addSlice(str(i), stack.getProcessor(index)) imp3 = ImagePlus("Frame " + str(nFrame), fr).duplicate() imp3.setDimensions(imp.getNChannels(), imp.getNSlices(), 1) comp = CompositeImage(imp3, CompositeImage.COMPOSITE) comp.show() return comp
def load_image(self, title): ''' Loads an image according to its title. Returns an imageplus. ''' imgpath = os.path.join(self.imgfolder, title) imp = imageloader(imgpath, debug=self.debug) composite_imp = CompositeImage(imp, 1) return composite_imp
imp = IJ.openImage("https://imagej.nih.gov/ij/images/flybrain.zip") stack = imp.getImageStack() # hyperstack data를 저장할 새로운 stack stack2 = ImageStack(imp.width, imp.height) # stack의 각 color slice를 두 개의 32-bit FloatProcessor slice로 변환 for i in xrange(1, imp.getNSlices() + 1): # index i의 ColorProcessor 추출 cp = stack.getProcessor(i) # Red, Green channel을 FloatProcessor로 추출 red = cp.toFloat(0, None) green = cp.toFloat(1, None) # red와 green을 stack2에 추가 stack2.addSlice(None, red) stack2.addSlice(None, green) # stack2로 ImagePlus 생성 imp2 = ImagePlus("32-bit 2-channel composite", stack2) # imp의 spatial and density calibraion data를 가져와 적용 imp2.setCalibration(imp.getCalibration().copy( )) # https://imagej.nih.gov/ij/developer/api/ij/measure/Calibration.html # stack2의 slice를 hyperstack form으로 표현하고 CompositeImage로 open. nChannels = 2 # two color channels nSlices = stack.getSize() # original image의 stack 수 nFrames = 1 # one time point만 imp2.setDimensions(nChannels, nSlices, nFrames) comp = CompositeImage(imp2, CompositeImage.COLOR) comp.show()
def create_registered_hyperstack(imp, target_folder, channel): """ Takes the imp, which contains a virtual hyper stack, and determines the x,y,z drift for each pair of time points, using the preferred given channel, and output one image for each slide into the target folder.""" shifts = compute_frame_translations(imp, channel) # Make shifts relative to 0,0,0 of the original imp: shifts = concatenate_shifts(shifts) print "shifts concatenated:" for s in shifts: print s.x, s.y, s.z # Compute bounds of the new volume, # which accounts for all translations: minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts) # Make shifts relative to new canvas dimensions # so that the min values become 0,0,0 for shift in shifts: shift.x -= minx shift.y -= miny shift.z -= minz print "shifts relative to new dimensions:" for s in shifts: print s.x, s.y, s.z # new canvas dimensions: width = imp.width + maxx - minx height = maxy - miny + imp.height slices = maxz - minz + imp.getNSlices() print "New dimensions:", width, height, slices # Count number of digits of each dimension, to output zero-padded numbers: slice_digits = len(str(slices)) frame_digits = len(str(imp.getNFrames())) channel_digits = len(str(imp.getNChannels())) # List to accumulate all created names: names = [] # Prepare empty slice to pad in Z when necessary empty = imp.getProcessor().createProcessor(width, height) # if it's RGB, fill the empty slice with blackness if isinstance(empty, ColorProcessor): empty.setValue(0) empty.fill() # Write all slices to files: stack = imp.getStack() for frame in range(1, imp.getNFrames() + 1): shift = shifts[frame - 1] fr = "t" + zero_pad(frame, frame_digits) # Pad with mpty slices before reaching the first slice for s in range(shift.z): ss = "_z" + zero_pad(s + 1, slice_digits) # slices start at 1 for ch in range(1, imp.getNChannels() + 1): name = fr + ss + "_c" + zero_pad(ch, channel_digits) + ".tif" names.append(name) FileSaver(ImagePlus("", empty)).saveAsTiff(target_folder + "/" + name) # Add all proper slices for s in range(1, imp.getNSlices() + 1): ss = "_z" + zero_pad(s + shift.z, slice_digits) for ch in range(1, imp.getNChannels() + 1): ip = stack.getProcessor(imp.getStackIndex(ch, s, frame)) ip2 = ip.createProcessor(width, height) # potentially larger ip2.insert(ip, shift.x, shift.y) name = fr + ss + "_c" + zero_pad(ch, channel_digits) + ".tif" names.append(name) FileSaver(ImagePlus("", ip2)).saveAsTiff(target_folder + "/" + name) # Pad the end for s in range(shift.z + imp.getNSlices(), slices): ss = "_z" + zero_pad(s + 1, slice_digits) for ch in range(1, imp.getNChannels() + 1): name = fr + ss + "_c" + zero_pad(ch, channel_digits) + ".tif" names.append(name) FileSaver(ImagePlus("", empty)).saveAsTiff(target_folder + "/" + name) # Create virtual hyper stack with the result vs = VirtualStack(width, height, None, target_folder) for name in names: vs.addSlice(name) vs_imp = ImagePlus("registered time points", vs) vs_imp.setDimensions(imp.getNChannels(), len(names) / (imp.getNChannels() * imp.getNFrames()), imp.getNFrames()) vs_imp.setOpenAsHyperStack(True) IJ.log("\nHyperstack dimensions: time frames:" + str(vs_imp.getNFrames()) + ", slices: " + str(vs_imp.getNSlices()) + ", channels: " + str(vs_imp.getNChannels())) if 1 == vs_imp.getNSlices(): return vs_imp # Else, as composite mode = CompositeImage.COLOR if isinstance(imp, CompositeImage): mode = imp.getMode() else: return vs_imp return CompositeImage(vs_imp, mode)
conFRETProjImpStack = concatStacks(conFRETProjImpStack, FRETProjImp) conlabelImpStack = concatStacks(conlabelImpStack, labelImp) thresholdImp.close() FRETimp2.close() FRETProjImp.close() labelImp.close() #Show the images and make the images pretty... I should have put in a function` conThresholdImp = ImagePlus("Threshold image for " + originalTitle, conThresholdStack) conThresholdImp.setDimensions(1, imp1.getNSlices(), imp1.getNFrames()) IJ.setMinAndMax(conThresholdImp, 0, 1) conThresholdImp.setCalibration(cal) conThresholdImp = CompositeImage(conThresholdImp, CompositeImage.COMPOSITE) conThresholdImp.show() conFRETImp2 = ImagePlus("Emission ratios X1000 of " + originalTitle, conFRETImp2Stack) conFRETImp2.setDimensions(1, imp1.getNSlices(), imp1.getNFrames()) conFRETImp2.setCalibration(cal) stats = StackStatistics(conFRETImp2) conFRETImp2 = CompositeImage(conFRETImp2, CompositeImage.COMPOSITE) IJ.setMinAndMax(conFRETImp2, 500, 3000) conFRETImp2.show() IJ.run("16_colors") conFRETProjImp = ImagePlus( "Max Z projection of emission ratios X1000 of " + originalTitle, conFRETProjImpStack)
from ij import IJ, ImagePlus, ImageListener, CompositeImage from ij.gui import PointRoi, NonBlockingGenericDialog from net.imglib2.converter import Converters from net.imglib2.realtransform import RealViews as RV from net.imglib2.img.display.imagej import ImageJFunctions as IL from net.imglib2.view import Views from net.imglib2.interpolation.randomaccess import NLinearInterpolatorFactory import os, sys from collections import defaultdict from java.awt import Color # Grab open image imp = IJ.getImage() # Replace with actual IO dialog later img = CompositeImage(imp) # Create a listener that, on slice change, updates the ROI class PointRoiRefresher(ImageListener): def __init__(self, imp, nuclei): self.imp = imp # A map of slice indices and 2D points, over the whole 4d volume self.nuclei = defaultdict( list) # Any query returns at least an empty list for frame, coord in nuclei.iteritems(): self.nuclei[frame] = coord def imageOpened(self, imp): pass def imageClosed(self, imp): if imp == self.imp:
def loadStacks(): IJ.log(" ") IJ.log("oib_tiff_converter_ver1.0; 08/24/2018; [email protected]") IJ.log("tested with: ImageJ2; FIJI update on 8/24/2018") srcDir = DirectoryChooser("Choose directory").getDirectory() if not srcDir: return sId = ".oib" #stackType = getChoice("", ["normal stack", "virtual stack"]) stackType = "normal stack" # Concatenate seems not to work with virtual #filepath = srcDir +"tiff" #print(filepath) iStack = 0 for root, directories, filenames in os.walk(srcDir): for filename in filenames: path = os.path.join(root, filename) if not (sId in filename): continue IJ.log("opening " + filename) if stackType == "virtual stack": IJ.run( "Bio-Formats Importer", "open=" + path + " color_mode=Default view=[Standard ImageJ] stack_order=Default use_virtual_stack" ) else: IJ.run( "Bio-Formats Importer", "open=" + path + " color_mode=Default view=[Standard ImageJ] stack_order=Default" ) # first stack if iStack == 0: imp = IJ.getImage() imp.setTitle( filename + ".tiff") # in order of the combining to work additive nSlices = imp.getNSlices() nChannels = imp.getNChannels() nTimeFrames = imp.getNFrames() imp.setDimensions(nChannels, nSlices, nTimeFrames) comp = CompositeImage(imp, CompositeImage.COLOR) comp.show() fs = FileSaver(comp) fs.saveAsTiff(srcDir + filename + "_hyperstack" + ".tiff") else: imp = IJ.getImage() comp = CompositeImage(imp, CompositeImage.COLOR) comp.show() fs = FileSaver(comp) fs.saveAsTiff(srcDir + filename + "_hyperstack" + ".tiff") iStack = iStack + 1 print(iStack) #IJ.log("nChannels = "+str(nChannels)) #IJ.log("nSlices = "+str(nSlices)) #IJ.log("nFrames = "+str(nTimeFrames*iStack)) #IJ.run("Stack to Hyperstack...", "order=xyczt(default) channels="+str(nChannels)+" slices="+str(nSlices)+" frames="+str(nTimeFrames)+" display=Color"); IJ.log( "convert back to normal stack: Image...Hyperstacks...Hyperstack to Stack" )
} } """, [ Img, Cursor, ArrayList, UnsignedByteType, Views, RandomAccessible, RealPoint, NearestNeighborSearchOnKDTree, ArrayImgs, Intervals ]) edge_pix1 = w2.findEdgePixels(img1) kdtree1 = KDTree(edge_pix1, edge_pix1) search1 = NearestNeighborSearchOnKDTree(kdtree1) edge_pix2 = w2.findEdgePixels(img2) kdtree2 = KDTree(edge_pix2, edge_pix2) search2 = NearestNeighborSearchOnKDTree(kdtree2) steps = [] for weight in [x / 10.0 for x in xrange(2, 10, 2)]: steps.append(w2.makeInterpolatedImage(img1, search1, img2, search2, weight)) vol4d = Views.stack([img1] + steps + [img2]) # Convert 1 -> 255 w2.set1as255(Views.iterable(vol4d).cursor()) imp3 = IL.wrap(vol4d, "interpolations") imp3.setDimensions(1, vol4d.dimension(2), vol4d.dimension(3)) imp3.setDisplayRange(0, 1) com = CompositeImage(imp3, CompositeImage.GRAYSCALE) com.show()
import glob arguments = docopt(__doc__, version='NER 0.1') input_file = arguments['INPUT'] output_dir = arguments['OUTPUT'] img = IJ.openImage(input_file) dims = img.getDimensions() width = dims[0] height = dims[1] num_channels = dims[2] num_frames = dims[4] # If it's the T index if num_frames == 1: num_frames = dims[3] # If it's the Z index img.setDimensions(num_channels, 1, num_frames) for i in range(1, num_frames + 1): frame = ImageStack(width, height) img.setT(i) for c in range(1, num_channels + 1): img.setC(c) ip = img.getProcessor() frame.addSlice(ip) frame_img = ImagePlus("Frame " + str(i), frame) comp = CompositeImage(frame_img, CompositeImage.GRAYSCALE) FileSaver(comp).saveAsTiff(output_dir + "/" + str(i).zfill(4) + ".tif")
tracked[i]= 11 print test.labelValues fp= ShortProcessor(len(tracked), 1,tracked , None) labelerImp= ImagePlus("labeler", fp) src2=clij2.push(labelerImp) conLabeledStack=ImageStack(imp1.width, imp1.height) if frames>1: for nFrame in range(1,frames+1): imp3=extractFrame(imp1, nFrame) src=clij2.push(imp3) dst=clij2.create(src) clij2.replaceIntensities(src, src2, dst) LabeledImp=clij2.pull(dst) conLabeledStack = concatStacks( conLabeledStack, LabeledImp) concatLabeledImp= ImagePlus("Labeled "+imageName, conLabeledStack) ImageConverter.setDoScaling(0) ImageConverter(concatLabeledImp).convertToGray16() IJ.setMinAndMax(concatLabeledImp, 0, 255) concatLabeledImp.setCalibration(imp1.getCalibration()) concatLabeledImp.setDimensions(1, imp1.getNSlices(), imp1.getNFrames()) concatLabeledImp = CompositeImage(concatLabeledImp, CompositeImage.COMPOSITE) concatLabeledImp.show() IJ.run("glasbey_on_dark") labelerImp.close()
for pattern in keys: imps[pattern] = IJ.openImage(ch[pattern][i]) slices = imps[keys[0]].getNSlices() width = imps[keys[0]].getWidth() height = imps[keys[0]].getHeight() # Create the new image stk = ImageStack(width, height) for slice in range(1, slices + 1): for channel, pattern in enumerate(keys): channel += 1 # enumeration from 1 imps[pattern].setPosition(channel, slice, 1) stk.addSlice(None, imps[pattern].getProcessor()) imp = ImagePlus(merge_name, stk) imp.setDimensions(channels, slices, 1) comp = CompositeImage(imp, CompositeImage.COLOR) # comp.show() saver = FileSaver(comp) saver.saveAsTiff(output_file) log.info('\t\tsaving: %s' % (output_file)) log.info('\tDone.') if __name__ in ['__builtin__', '__main__']: IJ.run("Console", "uiservice=[org.scijava.ui.DefaultUIService [priority = 0.0]]") run()
fixedT = Views.hyperSlice(self.img4d, 3, int((n - 1) / nZ)) # Z blocks fixedZ = Views.hyperSlice(fixedT, 2, (n - 1) % nZ) w.copy(fixedZ.cursor(), aimg.cursor()) return aimg.update(None).getCurrentStorageArray() def getProcessor(self, n): return ShortProcessor(self.dimensions[0], self.dimensions[1], self.getPixels(n), None) imp = ImagePlus("vol4d", Stack4D(vol4d)) nChannels = 1 nSlices = first.dimension(2) nFrames = len(timepoint_paths) imp.setDimensions(nChannels, nSlices, nFrames) com = CompositeImage(imp, CompositeImage.GRAYSCALE) com.show() # Detect nuclei from net.imglib2.algorithm.dog import DogDetection from collections import defaultdict from ij import ImageListener, ImagePlus from ij.gui import PointRoi from java.awt import Color import sys # Parameters for a Difference of Gaussian to detect nuclei positions calibration = [1.0 for i in range(vol4d.numDimensions()) ] # no calibration: identity sigmaSmaller = 2.5 # in pixels: a quarter of the radius of a neuron nuclei sigmaLarger = 5 # pixels: half the radius of a neuron nuclei
thresholdImp.setCalibration(cal) thresholdImp.setTitle("Binary mask of " + originalTitle) #add the images to concatenated stacks conThresholdStack = concatStacks(conThresholdStack, thresholdImp) conlabelImpStack = concatStacks(conlabelImpStack, labelImp) table = quantify(gfx4, gfx5, table, nFrame, originalTitle) thresholdImp.close() labelImp.close() IJ.log("Processing timeframe: " + str(nFrame)) table.show("Results of " + originalTitle) #Show the images and make the images pretty... I should have put in a function` conThresholdImp = ImagePlus("Threshold image for " + originalTitle, conThresholdStack) conThresholdImp.setDimensions(1, imp1.getNSlices(), imp1.getNFrames()) IJ.setMinAndMax(conThresholdImp, 0, 1) conThresholdImp.setCalibration(cal) conThresholdImp = CompositeImage(conThresholdImp, CompositeImage.COMPOSITE) conThresholdImp.show() conlabelImp = ImagePlus("Label map " + originalTitle, conlabelImpStack) conlabelImp.setDimensions(1, imp1.getNSlices(), imp1.getNFrames()) conlabelImp.setCalibration(cal) stats = StackStatistics(conlabelImp) conlabelImp = CompositeImage(conlabelImp, CompositeImage.COMPOSITE) IJ.setMinAndMax(conlabelImp, 0, stats.max) conlabelImp.show() IJ.run("glasbey_inverted")
def register_hyperstack_subpixel(imp, channel, shifts, target_folder, virtual): """ Takes the imp, determines the x,y,z drift for each pair of time points, using the preferred given channel, and outputs as a hyperstack. The shifted image is computed using TransformJ allowing for sub-pixel shifts using interpolation. This is quite a bit slower than just shifting the image by full pixels as done in above function register_hyperstack(). However it significantly improves the result by removing pixel jitter. """ # Compute bounds of the new volume, # which accounts for all translations: minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts) # Make shifts relative to new canvas dimensions # so that the min values become 0,0,0 for shift in shifts: shift.x -= minx shift.y -= miny shift.z -= minz # new canvas dimensions: width = int(imp.width + maxx - minx) height = int(maxy - miny + imp.height) slices = int(maxz - minz + imp.getNSlices()) print "New dimensions:", width, height, slices # prepare stack for final results stack = imp.getStack() if virtual is True: names = [] else: registeredstack = ImageStack(width, height, imp.getProcessor().getColorModel()) # prepare empty slice for padding empty = imp.getProcessor().createProcessor(width, height) IJ.showProgress(0) # get raw data as stack stack = imp.getStack() # loop across frames for frame in range(1, imp.getNFrames()+1): IJ.showProgress(frame / float(imp.getNFrames()+1)) fr = "t" + zero_pad(frame, len(str(imp.getNFrames()))) # for saving files in a virtual stack # get and report current shift shift = shifts[frame-1] print "frame",frame,"correcting drift",-shift.x-minx,-shift.y-miny,-shift.z-minz IJ.log(" frame "+str(frame)+" correcting drift "+str(round(-shift.x-minx,2))+","+str(round(-shift.y-miny,2))+","+str(round(-shift.z-minz,2))) # loop across channels for ch in range(1, imp.getNChannels()+1): tmpstack = ImageStack(width, height, imp.getProcessor().getColorModel()) # get all slices of this channel and frame for s in range(1, imp.getNSlices()+1): ip = stack.getProcessor(imp.getStackIndex(ch, s, frame)) ip2 = ip.createProcessor(width, height) # potentially larger ip2.insert(ip, 0, 0) tmpstack.addSlice("", ip2) # Pad the end (in z) of this channel and frame for s in range(imp.getNSlices(), slices): tmpstack.addSlice("", empty) # subpixel translation imp_tmpstack = ImagePlus("", tmpstack) imp_translated = translate_single_stack_using_imglib2(imp_tmpstack, shift.x, shift.y, shift.z) # Add translated frame to final time-series translated_stack = imp_translated.getStack() for s in range(1, translated_stack.getSize()+1): ss = "_z" + zero_pad(s, len(str(slices))) ip = translated_stack.getProcessor(s).duplicate() # duplicate is important as otherwise it will only be a reference that can change its content if virtual is True: name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif" names.append(name) currentslice = ImagePlus("", ip) currentslice.setCalibration(imp.getCalibration().copy()) currentslice.setProperty("Info", imp.getProperty("Info")); FileSaver(currentslice).saveAsTiff(target_folder + "/" + name) else: registeredstack.addSlice("", ip) IJ.showProgress(1) if virtual is True: # Create virtual hyper stack with the result registeredstack = VirtualStack(width, height, None, target_folder) for name in names: registeredstack.addSlice(name) registeredstack_imp = ImagePlus("registered time points", registeredstack) registeredstack_imp.setDimensions(imp.getNChannels(), slices, imp.getNFrames()) registeredstack_imp.setCalibration(imp.getCalibration().copy()) registeredstack_imp.setOpenAsHyperStack(True) else: registeredstack_imp = ImagePlus("registered time points", registeredstack) registeredstack_imp.setCalibration(imp.getCalibration().copy()) registeredstack_imp.setProperty("Info", imp.getProperty("Info")) registeredstack_imp.setDimensions(imp.getNChannels(), slices, imp.getNFrames()) registeredstack_imp.setOpenAsHyperStack(True) if 1 == registeredstack_imp.getNChannels(): return registeredstack_imp #IJ.log("\nHyperstack dimensions: time frames:" + str(registeredstack_imp.getNFrames()) + ", slices: " + str(registeredstack_imp.getNSlices()) + ", channels: " + str(registeredstack_imp.getNChannels())) # Else, as composite mode = CompositeImage.COLOR; if isinstance(imp, CompositeImage): mode = imp.getMode() else: return registeredstack_imp return CompositeImage(registeredstack_imp, mode)
def create_registered_hyperstack(imp, channel, target_folder, virtual): """ Takes the imp, determines the x,y,z drift for each pair of time points, using the preferred given channel, and outputs as a hyperstack.""" shifts = compute_frame_translations(imp, channel) # Make shifts relative to 0,0,0 of the original imp: shifts = concatenate_shifts(shifts) print "shifts concatenated:" for s in shifts: print s.x, s.y, s.z # Compute bounds of the new volume, # which accounts for all translations: minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts) # Make shifts relative to new canvas dimensions # so that the min values become 0,0,0 for shift in shifts: shift.x -= minx shift.y -= miny shift.z -= minz print "shifts relative to new dimensions:" for s in shifts: print s.x, s.y, s.z # new canvas dimensions: width = imp.width + maxx - minx height = maxy - miny + imp.height slices = maxz - minz + imp.getNSlices() print "New dimensions:", width, height, slices # Prepare empty slice to pad in Z when necessary empty = imp.getProcessor().createProcessor(width, height) # if it's RGB, fill the empty slice with blackness if isinstance(empty, ColorProcessor): empty.setValue(0) empty.fill() # Write all slices to files: stack = imp.getStack() if virtual is False: registeredstack = ImageStack(width, height, imp.getProcessor().getColorModel()) names = [] for frame in range(1, imp.getNFrames()+1): shift = shifts[frame-1] fr = "t" + zero_pad(frame, len(str(imp.getNFrames()))) # Pad with empty slices before reaching the first slice for s in range(shift.z): ss = "_z" + zero_pad(s + 1, len(str(slices))) # slices start at 1 for ch in range(1, imp.getNChannels()+1): name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif" names.append(name) if virtual is True: currentslice = ImagePlus("", empty) currentslice.setCalibration(imp.getCalibration().copy()) currentslice.setProperty("Info", imp.getProperty("Info")) FileSaver(currentslice).saveAsTiff(target_folder + "/" + name) else: empty = imp.getProcessor().createProcessor(width, height) registeredstack.addSlice(str(name), empty) # Add all proper slices stack = imp.getStack() for s in range(1, imp.getNSlices()+1): ss = "_z" + zero_pad(s + shift.z, len(str(slices))) for ch in range(1, imp.getNChannels()+1): ip = stack.getProcessor(imp.getStackIndex(ch, s, frame)) ip2 = ip.createProcessor(width, height) # potentially larger ip2.insert(ip, shift.x, shift.y) name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif" names.append(name) if virtual is True: currentslice = ImagePlus("", ip2) currentslice.setCalibration(imp.getCalibration().copy()) currentslice.setProperty("Info", imp.getProperty("Info")); FileSaver(currentslice).saveAsTiff(target_folder + "/" + name) else: registeredstack.addSlice(str(name), ip2) # Pad the end for s in range(shift.z + imp.getNSlices(), slices): ss = "_z" + zero_pad(s + 1, len(str(slices))) for ch in range(1, imp.getNChannels()+1): name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif" names.append(name) if virtual is True: currentslice = ImagePlus("", empty) currentslice.setCalibration(imp.getCalibration().copy()) currentslice.setProperty("Info", imp.getProperty("Info")) FileSaver(currentslice).saveAsTiff(target_folder + "/" + name) else: registeredstack.addSlice(str(name), empty) if virtual is True: # Create virtual hyper stack with the result registeredstack = VirtualStack(width, height, None, target_folder) for name in names: registeredstack.addSlice(name) registeredstack_imp = ImagePlus("registered time points", registeredstack) registeredstack_imp.setDimensions(imp.getNChannels(), len(names) / (imp.getNChannels() * imp.getNFrames()), imp.getNFrames()) registeredstack_imp.setCalibration(imp.getCalibration().copy()) registeredstack_imp.setOpenAsHyperStack(True) else: registeredstack_imp = ImagePlus("registered time points", registeredstack) registeredstack_imp.setCalibration(imp.getCalibration().copy()) registeredstack_imp.setProperty("Info", imp.getProperty("Info")) registeredstack_imp.setDimensions(imp.getNChannels(), len(names) / (imp.getNChannels() * imp.getNFrames()), imp.getNFrames()) registeredstack_imp.setOpenAsHyperStack(True) if 1 == registeredstack_imp.getNChannels(): return registeredstack_imp IJ.log("\nHyperstack dimensions: time frames:" + str(registeredstack_imp.getNFrames()) + ", slices: " + str(registeredstack_imp.getNSlices()) + ", channels: " + str(registeredstack_imp.getNChannels())) # Else, as composite mode = CompositeImage.COLOR; if isinstance(imp, CompositeImage): mode = imp.getMode() else: return registeredstack_imp return CompositeImage(registeredstack_imp, mode)
if auto_thresh: con = ImageConverter(image) con.convertToGray8() IJ.run( image, "Auto Local Threshold", "method=Bernsen radius=15 parameter_1=0 parameter_2=0 white stack") #image = CompositeImage(image_two) #image = IJ.getImage() z_slices = image.getDimensions()[3] / 2 print( "order=xyczt(default) channels=2 slices=" + str(z_slices) + " frames=1 display=Color", image.getDimensions()) image_two = HyperStackConverter.toHyperStack(image, 2, z_slices, 1) image = CompositeImage(image_two) image.show() rt = run_comdet(image) rt.save(directory + "/" + filename + "_results.csv") image = IJ.getImage() if auto_cell: mask = generate_mask(image_green, auto_cell_thresh) fs = FileSaver(mask) filepath = directory + "/" + filename + "_mask.tiff" fs.saveAsTiff(filepath) rest = ResultsTable.open(directory + "/" + filename + "_results.csv")