def main(*R): """ Main function """ # Show intial progress bar IJ.showProgress(0, 50) IJ.showStatus("Heterogeneous Z Correction") # Retrieve the values R (with default values) from previous attempts Rstr = "[(-0.05977, 83.3, 78.73),(-0.05976, 41.65, 39.36)]" R = eval(Prefs.get("HetZCorr.R", Rstr)) # Load current image and get infos imp = IJ.getImage() stk = imp.getStack() (sx, sy, sz) = stackSize(stk) cal = imp.getCalibration() # Get unique values val = getUniqueValues(stk) # Get R from dialog R = showDialogR(R, val) # Generate optical model correction = generateModel(stk, R, val) imp.setCalibration(cal) #Show model image correction.show() correction.setSlice(sz) IJ.run(correction, "Enhance Contrast", "saturated=0.35") IJ.run(correction, "Fire", "")
def generateModel(stk, R, val): """ Generation optical model """ (sx, sy, sz) = stackSize(stk) # Initialize correction output and temporary OPL cor = IJ.createImage("Correction", "32-bit black", sx, sy, sz) corstk = cor.getStack() OPL = 0 * [len(val)] OPL = [0 for i in range(len(val))] lastOPL = OPL for xx in range(sx): for yy in range(sy): sub = stk.getVoxels(xx, yy, 0, 1, 1, sz, []) for zz in range(sz): for ii in range(len(val)): if sub[zz] == val[ii]: if zz >= 1: OPL[ii] = lastOPL[ii] + 1 else: OPL[ii] = 1 else: if zz >= 1: OPL[ii] = lastOPL[ii] else: OPL[ii] = 0 lastOPL[ii] = OPL[ii] vv = f(OPL, R) corstk.setVoxel(xx, yy, zz, vv) IJ.showProgress(xx, sx) return cor
def download_from_url(download_url, target_dir, download_file=None, download_msg=None): ''' download file in temporary location move to target_dir clean up download ''' from ij import IJ print(download_url) # open url and set up using header information u = urllib2.urlopen(download_url) headers = u.info() download_size = int(headers['Content-Length']) print(u) print(headers) if download_file == None: if headers.has_key('Content-Disposition'): download_file = re.sub(".*filename=", "", headers['Content-Disposition']) else: IJ.error( "No filename specified for download and none in http header!") if download_msg == None: download_msg = 'Downloading: %s' % (download_file) tf = tempfile.NamedTemporaryFile(suffix=download_file, delete=False) print 'Downloading ' + download_url + ' to ' + tf.name print "Download size should be %d" % (download_size) dest_file = os.path.join(target_dir, download_file) print 'Destination location %s' % (dest_file) # Now for the download block_size = 100000 if download_size > block_size: bytes_read = 0 while bytes_read < download_size: IJ.showStatus("%s (%.1f/%.1f Mb)" % (download_msg, (bytes_read / 1000000.0), (download_size / 1000000.0))) IJ.showProgress(bytes_read, download_size) tf.file.write(u.read(block_size)) bytes_read += block_size IJ.showProgress(1.0) else: tf.file.write(u.read()) u.close() tf.file.close() print('Downloaded file has size %d') % (os.path.getsize(tf.name)) tf.close() shutil.move(tf.name, dest_file) IJ.showStatus('Cleaning up!')
def get_drift_matrix(self): '''Returns a NxN matrix that represents the drift of all images with each other. ''' full_progress = len(self.images)**2 for i in range(len(self.images)): for j in range(len(self.images)): IJ.showProgress((i * len(self.images) + j) / full_progress) self.drift_matrix[i][j] = self.get_drift(i, j) IJ.showProgress(1.0) return self.drift_matrix
def rot_around_x(input_stack): """do rotation around x axis""" output_slices = input_stack.getHeight(); output_width = input_stack.getWidth(); output_height = input_stack.getSize(); output_stack = ImageStack(output_width, output_height); for yidx in range(input_stack.getHeight()): IJ.showProgress(float(yidx)/output_slices); output_stack.addSlice(FloatProcessor(output_width, output_height, input_stack.getVoxels(0, yidx, 0, output_width, 1, output_height, []))); IJ.showProgress(1.0); return output_stack;
def concatenateImagePlus(files, outfile): """Concatenate images contained in files and save in outfile""" options = ImporterOptions() options.setId(files[0]) options.setVirtual(1) options.setOpenAllSeries(1) options.setQuiet(1) images = BF.openImagePlus(options) imageG = images[0] nrPositions = len(images) options.setOpenAllSeries(0) nslices = imageG.getNSlices() nframes = len(files) nchannels = imageG.getNChannels() luts = imageG.getLuts() for i in range(0, nrPositions): concatImgPlus = IJ.createHyperStack( "ConcatFile", imageG.getWidth(), imageG.getHeight(), imageG.getNChannels(), imageG.getNSlices(), len(files), imageG.getBitDepth()) concatStack = ImageStack(imageG.getWidth(), imageG.getHeight()) IJ.showStatus("Concatenating files") for file_ in files: try: print '...', basename(file_) options.setSeriesOn(i, 1) options.setId(file_) image = BF.openImagePlus(options)[0] imageStack = image.getImageStack() sliceNr = imageStack.getSize() for j in range(1, sliceNr+1): concatStack.addSlice(imageStack.getProcessor(j)) image.close() options.setSeriesOn(i, 0) except Exception, e: IJ.log("ERROR") IJ.log(file_ + str(e)) raise IJ.showProgress(files.index(file_), len(files)) concatImgPlus.setStack(concatStack, nchannels, nslices, nframes) concatImgPlus.setCalibration(image.getCalibration()) concatImgPlus.setOpenAsHyperStack(True) concatImgPlus.setLuts(luts) concatImgPlus.close() IJ.saveAs(concatImgPlus, "Tiff", outfile)
def get_drift_vector(self): '''Returns a vector of length N that represents the drift of all images with the first one. ''' full_progress = len(self.images) - 1 self.drift_vector[0] = (0., 0.) for i in range(1, len(self.images)): IJ.showProgress(i / full_progress) self.drift_vector[i] = self.get_drift(i - 1, i) self.drift_vector[i] = tuple([a + b for a, b in zip(self.drift_vector[i - 1], self.drift_vector[i] )]) IJ.showProgress(1.0) return self.drift_vector
def run_script(imp, close, far, cutoff): stack = imp.getImageStack() fin_stack = ImageStack(imp.width, imp.height) # iterate each slice in the stack for i in range(1, stack.getSize() + 1): # get progress IJ.showProgress(i, stack.getSize() + 1) # image processor converted to float to avoid byte problems ip = stack.getProcessor(i).convertToFloat() # pixels points to the array of floats pixels = ip.getPixels() # initial subtraction stddev = initial_subtract(pixels, imp.width, imp.height) mask = [255] * (imp.width * imp.height) # mask borders of image dependent on settings if far: fringe_mask(mask, imp.width, 2) elif close: fringe_mask(mask, imp.width, 1) else: pass # update mask if close: init_mask(pixels, mask) close_subtract(pixels, mask, imp.width, cutoff, stddev) if far: init_mask(pixels, mask) far_subtract(pixels, mask, imp.width, cutoff, stddev) if close or far: apply_mask(pixels, mask) fin_stack.addSlice(None, FloatProcessor(imp.width, imp.height, pixels, None)) # create new image from final stack imp_fin = ImagePlus(imp.title[:-4] + "_background-subtracted.tif", fin_stack) # keep the same image calibration imp_fin.setCalibration(imp.getCalibration().copy()) imp_fin.show() IJ.showProgress(1) # show progess bar
def create_images(self, count=3, mode='simple'): '''Creates N images to test the drift correction. :param count: the number of images to create (default: 3) :param mode: the type of images to create (options: 'simple', 'complex'; default: 'simple') ''' IJ.showProgress(0) if not len(self.images) == 0: self.images = [] if mode == 'simple': self._create_simple_images(count) elif mode == 'complex': self._create_complex_images(count) else: IJ.showProgress(1)
def concatenateImagePlus(files, outfile): """concatenate images contained in files and save in outfile""" ''' if len(files) == 1: IJ.log(files[0] + " has only one time point! Nothing to concatenate!") return ''' options = ImporterOptions() options.setId(files[0]) options.setVirtual(1) options.setOpenAllSeries(1) options.setQuiet(1) images = BF.openImagePlus(options) imageG = images[0] nrPositions = len(images) options.setOpenAllSeries(0) for i in range(0, nrPositions): concatImgPlus = IJ.createHyperStack("ConcatFile", imageG.getWidth(), imageG.getHeight(), imageG.getNChannels(), imageG.getNSlices(), len(files), imageG.getBitDepth()) concatStack = ImageStack(imageG.getWidth(), imageG.getHeight()) IJ.showStatus("Concatenating files") for file in files: try: IJ.log(" Add file " + file) options.setSeriesOn(i,1) options.setId(file) image = BF.openImagePlus(options)[0] imageStack = image.getImageStack() sliceNr = imageStack.getSize() for j in range(1, sliceNr+1): concatStack.addSlice(imageStack.getProcessor(j)) image.close() options.setSeriesOn(i,0) except: traceback.print_exc() IJ.log(file + " failed to concatenate!") IJ.showProgress(files.index(file), len(files)) concatImgPlus.setStack(concatStack) concatImgPlus.setCalibration(image.getCalibration()) if len(images) > 1: outfileP = addPositionName(i+1,outfile) IJ.saveAs(concatImgPlus, "Tiff", outfileP) else: IJ.saveAs(concatImgPlus, "Tiff", outfile) concatImgPlus.close()
def _create_simple_images(self, count): width, height = self.dim_simple ref = IJ.createImage('Ref', '32-bit black', width, height, 1) self._create_rect_and_noise(ref, self.roi_simple) self.images.append(ref) IJ.showProgress(1 / count) from random import randint for i in range(2, count + 1): roi_off = list(self.roi_simple[:]) roi_off[0] = randint(0.5 * self.roi_simple[0], 1.5 * self.roi_simple[0]) roi_off[1] = randint(0.5 * self.roi_simple[0], 1.5 * self.roi_simple[0]) imp = IJ.createImage('Img', '32-bit black', width, height, 1) self._create_rect_and_noise(imp, roi_off) imp.setTitle('%d,%d' % (roi_off[0] - self.roi_simple[0], roi_off[1] - self.roi_simple[1] )) self.images.append(imp) IJ.showProgress(i / count)
def download_and_untar_url(download_url,target_dir,untarfun,download_file=None, download_msg=None): ''' download file in temporary location untar to target_dir using untarfun clean up download ''' # open url and set up using header information u = urllib2.urlopen(download_url) headers = u.info() download_size = int(headers['Content-Length']) if download_file == None: if headers.has_key('Content-Disposition'): download_file = re.sub(".*filename=","",headers['Content-Disposition']) else: myErr("No filename specified for download and none in http header!") if download_msg==None: download_msg='Downloading: %s' % (download_file) tf=tempfile.NamedTemporaryFile(suffix=download_file) print 'Downloading '+download_url+' to '+ tf.name print "Download size should be %d" % (download_size) from ij import IJ # Now for the download block_size=100000 if download_size>block_size: bytes_read=0 while bytes_read<download_size: IJ.showStatus("%s (%.1f/%.1f Mb)" % (download_msg,(bytes_read/1000000.0),(download_size/1000000.0))) IJ.showProgress(bytes_read,download_size) tf.file.write(u.read(block_size)) bytes_read+=block_size IJ.showProgress(1.0) else: tf.file.write(u.read()) u.close() tf.file.close() print ('Downloaded file has size %d')%(os.path.getsize(tf.name)) untarfun(tf.name,target_dir) IJ.showStatus('Cleaning up!') tf.close()
def _create_complex_images(self, count): source = IJ.openImage('http://imagej.nih.gov/ij/images/NileBend.jpg') IJ.run(source, '32-bit', '') ref = self._crop_and_noise(source, self.roi_complex) ref.setTitle('Ref') self.images.append(ref) IJ.showProgress(1 / count) from random import randint for i in range(2, count + 1): roi_off = list(self.roi_complex[:]) roi_off[0] = randint(0.5 * self.roi_complex[0], 1.5 * self.roi_complex[0]) roi_off[1] = randint(0.5 * self.roi_complex[1], 1.5 * self.roi_complex[1]) imp = self._crop_and_noise(source, roi_off) imp.setTitle('%d,%d' % (self.roi_complex[0] - roi_off[0], self.roi_complex[1] - roi_off[1] )) self.images.append(imp) IJ.showProgress(i / count) source.close()
def __init__(self, pre1, pre2, post): ''' Preparing the algorithem by passing thre images. pre1 and pre2 are the pre-edge images used to estimate the power law background. post is the image, the estimated background signal is subtracted from. ''' self.pre1_imp = pre1 self.pre2_imp = pre2 self.post_imp = post eloss_pattern = re.compile(r'(\d+(?:[\.,]\d+)?)eV') self.eloss_pre1 = float(eloss_pattern.search(pre1.getTitle()).group(1)) self.eloss_pre2 = float(eloss_pattern.search(pre2.getTitle()).group(1)) self.eloss_post = float(eloss_pattern.search(post.getTitle()).group(1)) print('Energy-losses extracted form image titles: %deV, %deV and %deV' % (self.eloss_pre1, self.eloss_pre2, self.eloss_post)) # There are 5 repetitive called functions where _update_progress() is called. self.full_progress = 5 * post.getWidth() * post.getHeight() - 1 self.progress = 0 IJ.showProgress(0, self.full_progress)
def stack_median_filter(imp, radius_um=5.0): """perform 2D median filtering slicewise on a stack""" t1 = datetime.now(); stack = imp.getStack(); title = imp.getTitle(); n_z = imp.getNSlices(); progress_inc = 1 if n_z/20 < 1 else n_z/20; filt = RankFilters(); for idx in range(n_z): #print("Median filtering stack, {} % complete...".format(100 * round(float(zidx)/n_z, 3))); ip = stack.getProcessor(idx+1); filt.rank(ip, radius_um, RankFilters.MEDIAN); IJ.showProgress(float(idx)/n_z); if idx%progress_inc==0: IJ.showStatus("Median filtering stack in 2D: {}/{}".format(idx, n_z)); imp.updateAndDraw(); imp.setTitle("Median filtered (r={}) {}".format(radius_um, imp.getTitle()).replace(".tif", "")); t2 = datetime.now(); IJ.showProgress(1.0); print("Median filtering took {} s".format((t2-t1).total_seconds())); return imp;
def compute_frame_translations(imp, channel, frames = None): """ imp contains a hyper virtual stack, and we want to compute the X,Y,Z translation between every time point in it using the given preferred channel. """ if frames is None: frames = range(1, imp.getNFrames()+1) t1_vs = extract_frame(imp, frames[0], channel) shifts = [] # store the first shift: between t1 and t2 shifts.append(Point3i(0, 0, 0)) # append the rest: IJ.showProgress(0) i = 1 for t in frames[1:]: IJ.log("Load frame " + str(t) + " up to " + str(frames[-1])) t2_vs = extract_frame(imp, t, channel) IJ.log("Compute difference " + str(t) + " to " + str(frames[0])) shift = compute_stitch(ImagePlus("1", t1_vs), ImagePlus("2", t2_vs)) shifts.append(shift) t1_vs = t2_vs IJ.showProgress(i / float(imp.getNFrames())) i += 1 IJ.showProgress(1) return shifts, frames
def compute_and_update_frame_translations_dt(imp, dt, options, shifts = None): """ imp contains a hyper virtual stack, and we want to compute the X,Y,Z translation between every t and t+dt time points in it using the given preferred channel. if shifts were already determined at other (lower) dt they will be used and updated. """ nt = imp.getNFrames() # get roi (could be None) roi = imp.getRoi() #if roi: # print "ROI is at", roi.getBounds() # init shifts if shifts == None: shifts = [] for t in range(nt): shifts.append(Point3f(0,0,0)) # compute shifts IJ.showProgress(0) max_shifts = options['max_shifts'] for t in range(dt, nt+dt, dt): if t > nt-1: # together with above range till nt+dt this ensures that the last data points are not missed out t = nt-1 # nt-1 is the last shift (0-based) IJ.log(" between frames "+str(t-dt+1)+" and "+str(t+1)) # get (cropped and processed) image at t-dt roi1 = shift_roi(imp, roi, shifts[t-dt]) imp1 = extract_frame_process_roi(imp, t+1-dt, roi1, options) # get (cropped and processed) image at t-dt roi2 = shift_roi(imp, roi, shifts[t]) imp2 = extract_frame_process_roi(imp, t+1, roi2, options) #if roi: # print "ROI at frame",t-dt+1,"is",roi1.getBounds() # print "ROI at frame",t+1,"is",roi2.getBounds() # compute shift local_new_shift = compute_shift(imp2, imp1) limit_shifts_to_maximal_shifts(local_new_shift, max_shifts) if roi: # total shift is shift of rois plus measured drift #print "correcting measured drift of",local_new_shift,"for roi shift:",shift_between_rois(roi2, roi1) local_new_shift = add_Point3f(local_new_shift, shift_between_rois(roi2, roi1)) # determine the shift that we knew alrady local_shift = subtract_Point3f(shifts[t],shifts[t-dt]) # compute difference between new and old measurement (which come from different dt) add_shift = subtract_Point3f(local_new_shift,local_shift) #print "++ old shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),local_shift.x,local_shift.y,local_shift.z) #print "++ add shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),add_shift.x,add_shift.y,add_shift.z) # update shifts from t-dt to the end (assuming that the measured local shift will presist till the end) for i,tt in enumerate(range(t-dt,nt)): # for i>dt below expression basically is a linear drift predicition for the frames at tt>t # this is only important for predicting the best shift of the ROI # the drifts for i>dt will be corrected by the next measurements shifts[tt].x += 1.0*i/dt * add_shift.x shifts[tt].y += 1.0*i/dt * add_shift.y shifts[tt].z += 1.0*i/dt * add_shift.z #print "updated shift till frame",tt+1,"is",shifts[tt].x,shifts[tt].y,shifts[tt].z IJ.showProgress(1.0*t/(nt+1)) IJ.showProgress(1) return shifts
def get_result(self): '''Returns the ImagePlus objects that represent the elemental map, the SNR and the maps of the parameters r and ln(a). If the calculation of results was not done before, ths method invokes the calculation. ''' if not self.sigs: self.calc_map() lnA_imp = ImagePlus('Map of parameter ln(a)', FloatProcessor(self.post_imp.getWidth(), self.post_imp.getHeight(), array(self.lnAs, 'f') ) ) r_imp = ImagePlus('Map of parameter r', FloatProcessor(self.post_imp.getWidth(), self.post_imp.getHeight(), array(self.rs, 'f') ) ) sig_imp = ImagePlus('Elemental map %deV - %s' % (self.eloss_post, self.post_imp.getShortTitle()), FloatProcessor(self.post_imp.getWidth(), self.post_imp.getHeight(), array(self.sigs, 'f') ) ) if not self.snrs: self.calc_snr() snr_imp = ImagePlus('SNR %deV - %s' % (self.eloss_post, self.post_imp.getShortTitle()), FloatProcessor(self.post_imp.getWidth(), self.post_imp.getHeight(), array(self.snrs, 'f') ) ) IJ.showProgress(1.0) return sig_imp, snr_imp, r_imp, lnA_imp
def depthmap(stack): # Takes a single channel z stack. width = stack.getWidth() height = stack.getHeight() # Loop through slices in stack. size = stack.getSize() outstack = ImageStack() IJ.log("size: {}".format(size)) for z in range(1, size): # Calculate maxfilter. imslice = stack.getPixels(z) imslice = [i for i in imslice] imslice = FloatProcessor(width, height, imslice) imslice = maxfilter(imslice) outstack.addSlice(imslice) IJ.showProgress(1.0*z/size) # Return output stack. return outstack
def robust_convertStackToGrayXbit(imp, x=8, normalise=False): """simplified from https://github.com/imagej/imagej1/blob/master/ij/process/StackConverter.java, avoiding complications of scaling based on LUT taken from the current frame. Assumes conversion from grayscale image""" if x!=8 and x!=16 and x!=32: raise NotImplementedError("can't convert to the specified bit depth"); if imp.getBitDepth()==x: return imp; if normalise: stats = StackStatistics(imp); offset = stats.min; maxtomin = stats.max - stats.min; IJ.run(imp, "Subtract...", "value={} stack".format(offset)); IJ.run(imp, "Multiply...", "value={} stack".format(float(2**x - 1)/maxtomin)); current_slice = imp.getCurrentSlice(); stack = imp.getStack(); out_stack = ImageStack(imp.getWidth(), imp.getHeight()); n_z = imp.getNSlices(); progress_inc = 1 if n_z/20 < 1 else n_z/20; for idx in range(n_z): # always use 1 as old stack index since we remove a slice at each iteration label = stack.getSliceLabel(1); ip = stack.getProcessor(1); stack.deleteSlice(1); if x==8: out_stack.addSlice(label, ip.convertToByte(False)); elif x==16: out_stack.addSlice(label, ip.convertToShort(False)); elif x==32: out_stack.addSlice(label, ip.convertToFloat()); IJ.showProgress(float(idx)/n_z); if idx%progress_inc==0: IJ.showStatus("Converting stack to {}-bits: {}/{}".format(x, idx, n_z)); imp.setStack(out_stack); IJ.showProgress(1.0); imp.setSlice(current_slice); return imp;
def compute_and_update_frame_translations_dt(imp, channel, dt, process, shifts = None): """ imp contains a hyper virtual stack, and we want to compute the X,Y,Z translation between every t and t+dt time points in it using the given preferred channel. if shifts were already determined at other (lower) dt they will be used and updated. """ nt = imp.getNFrames() # get roi (could be None) roi = imp.getRoi() if roi: print "ROI is at", roi.getBounds() # init shifts if shifts == None: shifts = [] for t in range(nt): shifts.append(Point3f(0,0,0)) # compute shifts IJ.showProgress(0) for t in range(dt, nt+dt, dt): if t > nt-1: # together with above range till nt+dt this ensures that the last data points are not missed out t = nt-1 # nt-1 is the last shift (0-based) IJ.log(" between frames "+str(t-dt+1)+" and "+str(t+1)) # get (cropped and processed) image at t-dt roi1 = shift_roi(imp, roi, shifts[t-dt]) imp1 = extract_frame_process_roi(imp, t+1-dt, channel, process, roi1) # get (cropped and processed) image at t-dt roi2 = shift_roi(imp, roi, shifts[t]) imp2 = extract_frame_process_roi(imp, t+1, channel, process, roi2) if roi: print "ROI at frame",t-dt+1,"is",roi1.getBounds() print "ROI at frame",t+1,"is",roi2.getBounds() # compute shift local_new_shift = compute_stitch(imp2, imp1) if roi: # total shift is shift of rois plus measured drift print "correcting measured drift of",local_new_shift,"for roi shift:",shift_between_rois(roi2, roi1) local_new_shift = add_Point3f(local_new_shift, shift_between_rois(roi2, roi1)) # determine the shift that we knew alrady local_shift = subtract_Point3f(shifts[t],shifts[t-dt]) # compute difference between new and old measurement (which come from different dt) add_shift = subtract_Point3f(local_new_shift,local_shift) print "++ old shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),local_shift.x,local_shift.y,local_shift.z) print "++ add shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),add_shift.x,add_shift.y,add_shift.z) # update shifts from t-dt to the end (assuming that the measured local shift will presist till the end) for i,tt in enumerate(range(t-dt,nt)): # for i>dt below expression basically is a linear drift predicition for the frames at tt>t # this is only important for predicting the best shift of the ROI # the drifts for i>dt will be corrected by the next measurements shifts[tt].x += 1.0*i/dt * add_shift.x shifts[tt].y += 1.0*i/dt * add_shift.y shifts[tt].z += 1.0*i/dt * add_shift.z print "updated shift till frame",tt+1,"is",shifts[tt].x,shifts[tt].y,shifts[tt].z IJ.showProgress(1.0*t/(nt+1)) IJ.showProgress(1) return shifts
def compute_frame_translations(imp, channel): """ imp contains a hyper virtual stack, and we want to compute the X,Y,Z translation between every time point in it using the given preferred channel. """ t1_vs = extract_frame(imp, 1, channel) shifts = [] # store the first shift: between t1 and t2 shifts.append(Point3i(0, 0, 0)) # append the rest: IJ.showProgress(0) i = 1 for t in range(2, imp.getNFrames()+1): t2_vs = extract_frame(imp, t, channel) shift = compute_stitch(ImagePlus("1", t1_vs), ImagePlus("2", t2_vs)) shifts.append(shift) t1_vs = t2_vs IJ.showProgress(i / float(imp.getNFrames())) i += 1 IJ.showProgress(1) return shifts
IJ.log("stack dimension error!") #next step: measure the mean of each frame means = [] if isFrames: size = imp.getNFrames() else: size = imp.getNSlices() for i in xrange(1, size+1): #get imageprocessor for slice ip = stack.getProcessor(i) #show progress! IJ.showProgress(i, size+1) #find the mean using the getMean function, then append it to the list mean = getMean(ip, imp) means.append(mean) IJ.showProgress(1) IJ.resetMinAndMax() #set up the variables for plotting and then plot! x = xrange(1, size + 1) y = means plot = Plot("Illumination intensity stability (" + path.basename(stackpath) + ")", "Frame", "Mean frame intensity", [], []) plot.setLineWidth(1)
def __track(self, imgName) : """ Tracks all the cells in ONE stack. """ tobool = [self.__subback, self.__manthresh, self.__optionAngle, self.__optionNewCells, self.__optionTimelapse] self.__subback, self.__manthresh, self.__optionAngle, self.__optionNewCells, self.__optionTimelapse = map(bool, tobool) os.makedirs(self.__pathdir, mode=0777) imp=self.__dictImages[imgName] IJ.run(imp, "Set Scale...", "distance=0 known=0 pixel=1 unit=pixel") self.__maxLife=imp.getImageStackSize() tempdict={} self.__dict[imgName]=tempdict # We calculate the rois in the first image. RoisA = self.__calRois(imp,1) # we add the rois found in the first image in the dictionary of the cells. for i in range(len(RoisA)) : t="%04i" % (i) oldname=RoisA[i].getName() #newname=oldname+"->cell"+t newname="cell"+t cellule = Bacteria_Cell(newname) RoisA[i].setName(newname) tempdict[newname]=cellule cellule.setRoi(RoisA[i],0) cellule.setSlideEnd(imp.getImageStackSize()) cellule.setlistTimes(self.__dictTimeStack[imgName]) # we look at all pairs of images at t and t+1 in the stack, and we search for connections between ROIs at t and t+1. tempstacksize=imp.getImageStackSize() for i in range(2,tempstacksize+1) : IJ.showProgress(i, tempstacksize) liens=[] news=[] losts=[] RoisA=[cellule.getRoi(i-2) for cellule in self.__dict[imgName].values() if isinstance(cellule.getRoi(i-2),Roi) ] RoisB = self.__calRois(imp,i) # link returns 3 lists of tuples : one of rois that correspond, one of new rois at a given slide, and one of lost rois at a given slide. outlink = link(imp, i-1, i, RoisA,RoisB, self.__distparam, self.__distmethod, self.__optionAngle, self.__nbdigits, self.__optionNewCells) liens=outlink[0] news=outlink[1] losts=outlink[2] # we update the tab of rois for the cells for which we found a new ROI in another slide. lastindex=0 for lien in liens : celltemp=self.__getCell(imgName, lien[0], i-2) celltemp.setRoi(lien[1],i-1) tempname=str(celltemp.getName()) endname=str.rsplit(tempname, "cell", 1)[1] lien[1].setName(lien[1].getName()+"cell"+str(endname)) if int(endname)>lastindex : lastindex=int(endname) # we create new cells and add them to the dictionary count=lastindex+1 for new in news : t="%04i" % (count) #new[1].setName(new[1].getName()+"->cell"+t) new[1].setName("cell"+t) celltemp = Bacteria_Cell("cell"+t) tempdict[celltemp.name]=celltemp for j in range(i-1) : celltemp.setRoi("NOT HERE YET",j) celltemp.setRoi(new[1],i-1) celltemp.setSlideInit(i) celltemp.setSlideEnd(imp.getImageStackSize()) cellule.setlistTimes(self.__dictTimeStack[imgName]) count=count+1 # we complete the tab of rois of the cells that dispear in a slide. for lost in losts : celltemp=self.__getCell(imgName, lost[1], i-2) if celltemp is None : continue celltemp.setSlideEnd(i-1) for j in range(i-1,imp.getImageStackSize()) : celltemp.setRoi("LOST",j) #if self.__optionSave == True : self.__SaveStack(imgName,imp) self.__subback, self.__manthresh, self.__optionAngle, self.__optionNewCells, self.__optionTimelapse = map(bool, tobool)
def process_imgdir( directory, close_after ) : print( 'Processing: ' + directory ) original_image_filename = directory + '/uniform.tif' probabilities_image_filename = directory + '/probabilities.tif' cube_rois_filename = directory + '/CubeROIs.csv' cubes = [] f = open(cube_rois_filename) lines = f.readlines() for line in lines[1:]: #els = [ int(el)-1 for el in line.strip().split(',') ] els = [ int(el) for el in line.strip().split(',') ] cubes += [ { 'idx': els[0], 'class': els[1], 'x1':min(els[2],els[5]), 'y1':min(els[3],els[6]), 'z1':min(els[4],els[7]), 'x2':max(els[5],els[2]), 'y2':max(els[6],els[3]), 'z2':max(els[4],els[7]) } ] f.close() print( 'Read ' + str(len(cubes)) + ' cubes from file' ) #for cube in cubes: #for cube_idx in range( len( cubes ) ): for cube_idx in [0]: cube = cubes[cube_idx] print( cube ) all_edt_histograms['name'] += [ directory.split( '/' )[-1] + '_' + str(cube_idx) ] all_skeleton_edt_histograms['name'] += [ directory.split( '/' )[-1] + '_' + str(cube_idx) ] cube_basename = directory + '/cube_' + str(cube_idx) + '_' # Check if we have a parameters file, and evaluate it if so, otherwise make one with defaults param_filename = cube_basename + 'parameters.py' # Make a default to start with no_params_set = False if not os.path.isfile( param_filename ): f = open( param_filename, 'w' ) f.write( 'mesh_thresh=200\n' ) f.write( 'interior_coords=[ (1,1,1) ]\n' ) f.close() no_params_set = True if os.path.isfile( param_filename ): f = open( param_filename ) lines = f.readlines() for line in lines: if 'mesh_thresh' in line: mesh_thresh = eval(line.split('=')[1]) if 'interior_coords' in line: interior_coords = eval(line.split('=')[1]) load_uniform = True if load_uniform: #open uniform uniform = IJ.openImage( original_image_filename ) #crop uniform uniform.setRoi( cube['x1'], cube['y1'], cube['x2'], cube['y2'] ) IJ.run( uniform, "Crop", "") IJ.run( uniform, "Make Substack...", "slices=" + str(cube['z1']) + '-' + str(cube['z2']) ) uniform.close() uniform = IJ.getImage() IJ.saveAsTiff( uniform, cube_basename + 'raw.tif' ) #save max_projection of uniform IJ.run(uniform, "Z Project...", "projection=[Max Intensity]") uniform_mp = IJ.getImage() IJ.run(uniform_mp, "Enhance Contrast", "saturated=0.35") IJ.saveAsTiff( uniform_mp, cube_basename + 'raw_maxproj.tif' ) #open probabilites probabilities = IJ.openImage( probabilities_image_filename ) #crop probabilites probabilities.setRoi( cube['x1'], cube['y1'], cube['x2'], cube['y2'] ) IJ.run( probabilities, "Crop", "") IJ.run( probabilities, "Make Substack...", "slices=" + str(cube['z1']) + '-' + str(cube['z2']) ) time.sleep( 5 ) probabilities.close() probabilities = IJ.getImage() #IJ.saveAsTiff( probabilities, cube_basename + 'probabilities.tif' ) time.sleep( 5 ) #save max_projection of probabilites IJ.run(probabilities, "Z Project...", "projection=[Max Intensity]") probabilities_mp = IJ.getImage() IJ.run(probabilities_mp, "Enhance Contrast", "saturated=0.35") IJ.saveAsTiff( probabilities_mp, cube_basename + 'probabilities_maxproj.tif' ) time.sleep( 5 ) # blur probabilities to help smooth the isosurfaces blur_radius = 2 IJ.run( probabilities, "Gaussian Blur 3D...", 'x=' + str(blur_radius) + ' y=' + str(blur_radius) + ' z=' + str(blur_radius) ) #threshold probabilities #IJ.run(probabilities, "Enhance Contrast", "saturated=0.35") IJ.run( probabilities, "8-bit", "") #IJ.saveAsTiff( probabilities, cube_basename + 'probabilities.tif' ) #for k in range( probabilities.getNSlices() ): # probabilities.setSlice(k+1) # probabilities.getProcessor().setThreshold( mesh_thresh - 1, mesh_thresh + 1, 0 ) #IJ.run( probabilities, 'Convert to Mask', 'method=Default background=Dark black') #time.sleep( 5 ) IJ.saveAsTiff( probabilities, cube_basename + 'probabilities.tif' ) time.sleep( 5 ) #mesh binary_probabilities #IJ.run( "3D Viewer", "") threedType = 2 threedName = 'cube_' + str(cube_idx) IJ.runPlugIn( "ij3d.ImageJ3DViewer", probabilities.getTitle() ) univ = Image3DUniverse.universes.get(0) univ.addContent( probabilities, Color3f(1,1,1), threedName, mesh_thresh, [True, False, False], 2, threedType ) ImageJ3DViewer.select( threedName ) ImageJ3DViewer.exportContent( 'wavefront', cube_basename + 'mesh.obj' ) #smooth mesh c = univ.getSelected() n = c.getContent() ctm = n.getMesh() fim = customnode.FullInfoMesh( ctm.getMesh() ) ec = customnode.EdgeContraction( fim, False ) initial_num_verts = ec.getVertexCount() num_to_remove = int( initial_num_verts * 0.1 ) #v = InteractiveMeshDecimation.simplify( ec, num_to_remove ) enable_smoothing = False if enable_smoothing: part = num_to_remove / 10 last = num_to_remove % 10 ret = 0 for i in range(10): IJ.showProgress(i + 1, 10) ret = ec.removeNext(part) if (last != 0): ret = ec.removeNext(last) IJ.showProgress(1) ctm.setMesh( fim.getMesh() ) ImageJ3DViewer.exportContent( 'wavefront', cube_basename + 'smooth_mesh.obj' ) # 3d viewer screenshot screenshot_3d = univ.takeSnapshot() IJ.saveAsTiff( screenshot_3d, cube_basename + 'mesh_screenshot.tif' ) #voxelize mesh voxelizer = InteractiveMeshVoxelization() voxelizer.voxelize( ctm, probabilities.getWidth(), probabilities.getHeight(), probabilities.getStackSize() ) #voxelization = WindowManager.getImage( ctm.getName() + '_voxelization' ) voxelization = WindowManager.getImage( 'null_voxelization' ) voxelization.setCalibration( probabilities.getCalibration().copy() ) #manually 3D fill vasculature, fill with thresholdable-color #save selected vasculature #call('process3d.Flood_Fill.fill', x,y,z) if not no_params_set: fill_color = 100 Flood_Fill.fill( voxelization, interior_coords[0][0], interior_coords[0][1], interior_coords[0][2], fill_color ) # Threshold to extract #IJ.setAutoThreshold( voxelization, 'Default dark' ) #Prefs.blackBackground = true #IJ.run( voxelization, 'Convert to Mask', 'method=Default background=Dark black') for k in range( voxelization.getNSlices() ): voxelization.setSlice(k+1) voxelization.getProcessor().setThreshold( fill_color - 1, fill_color + 1, 0 ) IJ.run( voxelization, 'Convert to Mask', 'method=Default background=Dark black') IJ.saveAsTiff( voxelization, cube_basename + 'voxelization.tif' ) # Calculate and record volume HERE IJ.run(voxelization, "Z Project...", "projection=[Max Intensity]") voxelization_mp = IJ.getImage() IJ.run(voxelization_mp, "Enhance Contrast", "saturated=0.35") IJ.saveAsTiff( voxelization_mp, cube_basename + 'voxelization_maxproj.tif' ) # Calculate EDT IJ.run( voxelization, "Exact Euclidean Distance Transform (3D)", "" ) edt = WindowManager.getImage( 'EDT' ) IJ.run( edt, 'Fire', '' ) # Get the histogram data in an array hist_nBins = int( ( hist_max - hist_min ) / hist_step ) #edt_stats = edt.getStatistics( Measurements.MEDIAN, hist_nBins, hist_min, hist_max ) edt_stats = StackStatistics( edt, hist_nBins, hist_min, hist_max ) hist_data = edt_stats.getHistogram() hist_binLabels = [ ( hist_min + hist_step * el ) for el in range( hist_nBins ) ] max_radius = 20 IJ.run( edt, 'Histogram', 'bins=' + str(hist_nBins) + ' x_min=' + str(hist_min) + ' x_max=' + str(hist_max) + ' y_max=Auto stack' ) edt_histogram = WindowManager.getImage( 'Histogram of EDT' ) IJ.saveAsTiff( edt_histogram, cube_basename + 'edt_histogram.tif' ) x_unit_scale = uniform.getCalibration().getX(1) y_unit_scale = uniform.getCalibration().getY(1) z_unit_scale = uniform.getCalibration().getZ(1) f = open( cube_basename + 'edt_histogram.csv', 'w' ) for k in range( len( hist_data ) ): f.write( str(hist_binLabels[k]) + '\t' + str(hist_data[k]*x_unit_scale*y_unit_scale*z_unit_scale) + '\n' ) #f.write( str(hist_binLabels[k]) + '\t' + str(hist_data[k]) + '\n' ) all_edt_histograms[hist_binLabels[k]] += [ hist_data[k] ] f.close() # Handling skeletons IJ.run( voxelization, "Skeletonize (2D/3D)", "") skeleton = voxelization # For simplicity later, but note that voxelization has been mutated IJ.run(skeleton, "32-bit", "") IJ.run(skeleton, "Calculator Plus", 'i1=' + str(skeleton.getTitle()) + ' i2=' + str(skeleton.getTitle()) + ' operation=[Scale: i2 = i1 x k1 + k2] k1=0.003921568627 k2=0' ) IJ.run(skeleton, "Z Project...", "projection=[Max Intensity]") skeleton_mp = IJ.getImage() IJ.run(skeleton_mp, "Enhance Contrast", "saturated=0.35") IJ.saveAsTiff( skeleton_mp, cube_basename + 'skeleton_maxproj.tif' ) ic = ImageCalculator() skeleton_edt = ic.run("Multiply 32-bit stack", edt, skeleton) IJ.run( skeleton_edt, 'Fire', '' ) # Get the histogram data in an array hist_nBins = int( ( hist_max - hist_min ) / hist_step ) #skeleton_edt_stats = edt.getStatistics( Measurements.MEDIAN, hist_nBins, hist_min, hist_max ) skeleton_edt_stats = StackStatistics( edt, hist_nBins, hist_min, hist_max ) hist_data = skeleton_edt_stats.getHistogram() hist_binLabels = [ ( hist_min + hist_step * el ) for el in range( hist_nBins ) ] IJ.run( skeleton_edt, 'Histogram', 'bins=' + str(hist_nBins) + ' x_min=' + str(hist_min) + ' x_max=' + str(hist_max) + ' y_max=Auto stack' ) skeleton_edt_histogram = WindowManager.getImage( 'Histogram of EDT' ) IJ.saveAsTiff( skeleton_edt_histogram, cube_basename + 'skeleton_edt_histogram.tif' ) unit_scale = uniform.getCalibration().getX(1) f = open( cube_basename + 'skeleton_edt_histogram.csv', 'w' ) for k in range( len( hist_data ) ): f.write( str(hist_binLabels[k]) + '\t' + str(hist_data[k] * unit_scale) + '\n' ) all_skeleton_edt_histograms[hist_binLabels[k]] += [ hist_data[k] * unit_scale ] f.close() else: IJ.saveAsTiff( voxelization, cube_basename + 'voxelization.tif' ) # Free up resources if close_after: ImageJ3DViewer.close() IJ.run( 'Close All', '' ) IJ.freeMemory()
def regBf(fn=None, imp=None, refId=None): """ Register a time series stack to a specified reference slice, from a file (imported by BioFormat) or a stack ImagePlus. Returns a registered ImagePlus. The stack must have only 1 z layer. refId is in the format of [int channel, int slice, int frame] If no refId is supplied, will use the first slice [1,1,1] Note: since TurboReg is used for registeration, there will be temporary opened image windows. """ ## Prepare the right ImagePlus if imp is None: if fn is None: od = OpenDialog("Choose a file", None) filename = od.getFileName() if filename is None: print "User canceled the dialog!" return else: directory = od.getDirectory() filepath = directory + filename print "Selected file path:", filepath else: if os.path.exists(fn) and os.path.isfile(fn): filepath = fn else: print "File does not exist!" return imps = BF.openImagePlus(filepath) imp = imps[0] if imp is None: print "Cannot load file!" return else: if fn is not None: print "File or ImagePlus? Cannot load both." return width = imp.getWidth() height = imp.getHeight() # C nChannels = imp.getNChannels() # Z nSlices = imp.getNSlices() # T nFrames = imp.getNFrames() # pixel size calibration = imp.getCalibration() # Only supoort one z layer if nSlices != 1: print "Only support 1 slice at Z dimension." return # set registration reference slice if refId is None: refC = 1 refZ = 1 refT = 1 else: refC = refId[0] refZ = refId[1] refT = refId[2] if (refC not in range(1, nChannels+1) or refZ not in range(1, nSlices+1) or refT not in range(1, nFrames+1) ): print "Invalid reference image!" return stack = imp.getImageStack() registeredStack = ImageStack(width, height, nChannels*nFrames*nSlices) # setup windows, these are needed by TurboReg tmpip = FloatProcessor(width, height) refWin = ImageWindow(ImagePlus("ref", tmpip)) bounds = refWin.getBounds() # refWin.setVisible(False) toRegWin = ImageWindow(ImagePlus("toReg", tmpip)) toRegWin.setLocation(bounds.width + bounds.x, bounds.y) # toRegWin.setVisible(False) toTransformWin = ImageWindow(ImagePlus("toTransform", tmpip)) toTransformWin.setLocation(2 * bounds.width + bounds.x, bounds.y) # toTransformWin.setVisible(False) # get reference image refImp = ImagePlus("ref", stack.getProcessor(imp.getStackIndex(refC, refZ, refT))) refWin.setImage(refImp) tr = TurboReg_() for t in xrange(1, nFrames+1): IJ.showProgress(t-1, nFrames) # print "t ", t # do TurboReg on reference channel toRegId = imp.getStackIndex(refC, refZ, t) toRegImp = ImagePlus("toReg", stack.getProcessor(toRegId)) toRegWin.setImage(toRegImp) regArg = "-align " +\ "-window " + toRegImp.getTitle() + " " +\ "0 0 " + str(width - 1) + " " + str(height - 1) + " " +\ "-window " + refImp.getTitle() + " " +\ "0 0 " + str(width - 1) + " " + str(height - 1) + " " +\ "-rigidBody " +\ str(width / 2) + " " + str(height / 2) + " " +\ str(width / 2) + " " + str(height / 2) + " " +\ "0 " + str(height / 2) + " " +\ "0 " + str(height / 2) + " " +\ str(width - 1) + " " + str(height / 2) + " " +\ str(width - 1) + " " + str(height / 2) + " " +\ "-hideOutput" tr = TurboReg_() tr.run(regArg) registeredImp = tr.getTransformedImage() sourcePoints = tr.getSourcePoints() targetPoints = tr.getTargetPoints() registeredStack.setProcessor(registeredImp.getProcessor(), toRegId) # toRegImp.flush() # apply transformation on other channels for c in xrange(1, nChannels+1): # print "c ", c if c == refC: continue toTransformId = imp.getStackIndex(c, 1, t) toTransformImp = ImagePlus("toTransform", stack.getProcessor(toTransformId)) toTransformWin.setImage(toTransformImp) transformArg = "-transform " +\ "-window " + toTransformImp.getTitle() + " " +\ str(width) + " " + str(height) + " " +\ "-rigidBody " +\ str(sourcePoints[0][0]) + " " +\ str(sourcePoints[0][1]) + " " +\ str(targetPoints[0][0]) + " " +\ str(targetPoints[0][1]) + " " +\ str(sourcePoints[1][0]) + " " +\ str(sourcePoints[1][1]) + " " +\ str(targetPoints[1][0]) + " " +\ str(targetPoints[1][1]) + " " +\ str(sourcePoints[2][0]) + " " +\ str(sourcePoints[2][1]) + " " +\ str(targetPoints[2][0]) + " " +\ str(targetPoints[2][1]) + " " +\ "-hideOutput" tr = TurboReg_() tr.run(transformArg) registeredStack.setProcessor(tr.getTransformedImage().getProcessor(), toTransformId) # toTransformImp.flush() sourcePoints = None targetPoints = None IJ.showProgress(t, nFrames) IJ.showStatus("Frames registered: " + str(t) + "/" + str(nFrames)) refWin.close() toRegWin.close() toTransformWin.close() imp2 = ImagePlus("reg_"+imp.getTitle(), registeredStack) imp2.setCalibration(imp.getCalibration().copy()) imp2.setDimensions(nChannels, nSlices, nFrames) # print "type ", imp.getType() # print "type ", imp2.getType() # print nChannels, " ", nSlices, " ", nFrames # print registeredStack.getSize() for key in imp.getProperties().stringPropertyNames(): imp2.setProperty(key, imp.getProperty(key)) # comp = CompositeImage(imp2, CompositeImage.COLOR) # comp.show() # imp2 = imp.clone() # imp2.setStack(registeredStack) # imp2.setTitle("reg"+imp.getTitle()) # imp2.show() # imp.show() return imp2
#for cell in listfilescells : f1 = open(rootdir+now+"-R1-MT.txt", "w") tab="\t" f1.write("cell"+tab+"maxFrames"+tab+"maxcumul"+tab+"nrevs"+"\n") for cle in listcellname : rm.runCommand("reset") #cle = cell.rsplit("/", 1)[1][:-len(".cell")] #cles.append(cle) rm.runCommand("Open", dictRois[cle]) rm.runCommand("Show None") RawroisArray=rm.getRoisAsArray() if len(RawroisArray)< minLife : continue roisArray=[RawroisArray[i] for i in range(0,len(RawroisArray), subs)] IJ.showStatus(cle) IJ.showProgress(listcellname.index(cle), len(listcellname)) dxA=[] dyA=[] dxB=[] dyB=[] dA=[] dB=[] sensA = 1 sensB = -1 nrev = 0 color=colors.pop(0) colorA=color.brighter() colorB=color.darker()
# Script main body #============================================================================== options = getOptions() if options is not None: width = options.width height = options.height x0 = width / 2.0 sigma_x = width * options.sigma_x y0 = height / 2.0 sigma_y = height * options.sigma_y pixels = zeros('f', width * height) pixels_index = MapPositionToIndex(width) val_sum = 0 for y in range(height): IJ.showProgress(y, height-1) for x in range(width): val = Gaussian2D(x0, y0, x, y, sigma_x, sigma_y) val_sum += val pixels[pixels_index.at(x, y)] = val for i in range(len(pixels)): IJ.showProgress(i, len(pixels) - 1) pixels[i] = pixels[i] / val pixels_fp = FloatProcessor(width, height, pixels, None) imp = ImagePlus('2D Gaussian', pixels_fp) imp.show()
def main(imp, tolerance, window_radius, pixel_size_nm, out_dir): # set output dir out_dir = str(out_dir) # Find maxima excludeOnEdge = True polygon = MaximumFinder().getMaxima(imp.getProcessor(), tolerance, excludeOnEdge) roi = PointRoi(polygon) # get RoiManager rm = RoiManager.getInstance(); if rm is None: rm = RoiManager() # Check if output table is writable out_table_fn = os.path.join(out_dir, "fwhm_values.txt") try: file_handle = open(out_table_fn, 'w') except IOError: IJ.showMessage("Output file '' not writeable. Check if file is open in Excel...") sys.exit(0) # iterate and write output with file_handle as csvfile: writer = csv.DictWriter(csvfile, fieldnames=["id", "peak_id", "x_pos", "y_pos", "type", "fwhm", "fwhm_nm", "r2_GoF", "avg_fwhm", "area_profile", "area_gauss"], delimiter="\t", lineterminator='\n') writer.writeheader() id_ = 0 # over all peaks for i, p in list(enumerate(roi)): IJ.showProgress(i, roi.getNCounters() +1) # Horizontal lroi = Line(p.x+0.5-window_radius, p.y+0.5, p.x+0.5+window_radius, p.y+0.5) output = fit_gauss(lroi, imp, p, i, id_, "H", rm) writer.writerow(output) id_+=1 # Vertical lroi = Line(p.x+0.5, p.y+0.5-window_radius, p.x+0.5, p.y+0.5+window_radius) output = fit_gauss(lroi, imp, p, i, id_, "V", rm) writer.writerow(output) id_+=1 # Diagonal 1 lroi = Line(p.x+0.5-MN*window_radius, p.y+0.5+MN*window_radius, p.x+0.5+MN*window_radius, p.y+0.5-MN*window_radius) output = fit_gauss(lroi, imp, p, i, id_, "D1", rm) writer.writerow(output) id_+=1 # Diagonal 2 lroi = Line(p.x+0.5-MN*window_radius, p.y+0.5-MN*window_radius, p.x+0.5+MN*window_radius, p.y+0.5+MN*window_radius) output = fit_gauss(lroi, imp, p, i, id_, "D2", rm) writer.writerow(output) id_+=1 IJ.showProgress(1) rm.runCommand("Deselect"); # deselect ROIs to save them all rm.runCommand("Save", os.path.join(out_dir, "fwhm_fiji_rois.zip")) IJ.showMessage("FWHM on Spots: Done")
def main_interactive(): """The main routine for running interactively.""" log.info('Running in interactive mode.') (base, fname) = ui_get_input_file() if (base is None): return log.warn("Parsing project file: %s" % (base + fname)) IJ.showStatus("Parsing experiment file...") mosaics = fv.FluoViewMosaic(join(base, fname), runparser=False) IJ.showStatus("Parsing mosaics...") progress = 0.0 count = len(mosaics.mosaictrees) step = 1.0 / count for subtree in mosaics.mosaictrees: IJ.showProgress(progress) mosaics.add_mosaic(subtree) progress += step IJ.showProgress(progress) IJ.showStatus("Parsed %i mosaics." % len(mosaics)) dialog = GenericDialog('FluoView OIF / OIB Stitcher') if len(mosaics) == 0: msg = ("Couldn't find any (valid) mosaics in the project file.\n" " \n" "Please make sure to have all files available!\n" " \n" "Will stop now.\n") log.warn(msg) dialog.addMessage(msg) dialog.showDialog() return msg = "------------------------ EXPORT OPTIONS ------------------------" dialog.addMessage(msg) formats = ["OME-TIFF", "ICS/IDS"] dialog.addChoice("Export Format", formats, formats[0]) dialog.addCheckbox("separate files by Z slices (OME-TIFF only)?", False) msg = "------------------------ EXPORT OPTIONS ------------------------" dialog.addMessage(msg) dialog.addMessage("") dialog.addMessage("") msg = gen_mosaic_details(mosaics) log.warn(msg) msg += "\n \nPress [OK] to write tile configuration files\n" msg += "and continue with running the stitcher." dialog.addMessage(msg) dialog.showDialog() opts = {} if dialog.getNextChoice() == 'ICS/IDS': opts['export_format'] = '".ids"' else: opts['export_format'] = '".ome.tif"' if dialog.getNextBoolean() == True: opts['split_z_slices'] = 'true' code = imagej.gen_stitching_macro_code(mosaics, 'templates/stitching', path=base, tplpath=imcftpl, opts=opts) log.warn("============= generated macro code =============") log.warn(flatten(code)) log.warn("============= end of generated macro code =============") if dialog.wasOKed(): log.warn('Writing stitching macro.') imagej.write_stitching_macro(code, fname='stitch_all.ijm', dname=base) log.warn('Writing tile configuration files.') imagej.write_all_tile_configs(mosaics, fixsep=True) log.warn('Launching stitching macro.') IJ.runMacro(flatten(code))
for layer in layers: vector_data.addAll( cast_collection(layer.getDisplayables(VectorData, False, True), VectorData, True)) vector_data.addAll( cast_collection(layerset.getZDisplayables(VectorData, True), VectorData, True)) # Propagate before or propagate after # TO DO # Apply transforms to patches progress = 0 for mesh, layer in zip(meshes, layers): Utils.log("Applying transforms to patches...") IJ.showProgress(0, len(layers)) mlt = MovingLeastSquaresTransform2() mlt.setModel(AffineModel2D) mlt.setAlpha(2.0) mlt.setMatches(mesh.getVA().keySet()) # ElasticLayerAlignment uses newer concurrent methods for this pool = Executors.newFixedThreadPool(MAX_NUM_THREADS) patch_transforms = [] patches = layer.getDisplayables(Patch) for patch in patches: pt = PatchTransform(patch, mlt.copy()) patch_transforms.append(pt) futures = pool.invokeAll(patch_transforms) for future in futures:
A speed test example fixed Modifications Date Who Ver What ---------- --- ------ ------------------------------------------------- 2017-04-02 JRM 0.1.00 Initial test with 4096. Original example did not import functions. 11.594sec on jrmFastMac """ from org.python.core import codecs codecs.setDefaultEncoding('utf-8') from java.lang import System from ij import IJ, ImagePlus from ij.process import FloatProcessor from math import sqrt t0 = System.currentTimeMillis() size = 4096 ip = FloatProcessor(size,size) for y in range(size): IJ.showProgress(y,size-1) for x in range(size): dx=x-size/2; dy=y-size/2 d = sqrt(dx*dx+dy*dy) ip.setf(x,y,-d) time = str((System.currentTimeMillis()-t0)/1000.0)+" seconds" ImagePlus(time,ip).show() IJ.run("Red/Green");
def show_progress(cur, final): """Wrapper to update the progress bar and issue a log message.""" # ij.IJ.showProgress is adding 1 to the value given as first parameter... log.info("Progress: %s / %s (%s)", cur + 1, final, (1.0 + cur) / final) IJ.showProgress(cur, final)
print basic_info ## Outputs each stitched z plane as a separate file iReader = ImageReader() iReader.setId(parentLSMFilePath) for z in range(max_coords[2]+basic_info[4]): ## for z in range(50,51): IJ.showStatus("z: "+str(z+1)+" of "+str(max_coords[2]+basic_info[4])) chIps = [] resImages = [] for ch in range(basic_info[0]): chIps.append(ByteProcessor(max_coords[0]+scale_info[2],max_coords[1]+scale_info[2])) for ch in range(basic_info[0]): resImages.append(ImagePlus("ch"+str(ch+1),chIps[ch])) for se in range(basic_info[1]): IJ.showProgress(se,basic_info[1]) if z >= coords_upscaled[se][2] and z <= coords_upscaled[se][2]+basic_info[4]-1: iReader.setSeries(se) for ch in range(basic_info[0]): byteArray = iReader.openBytes((z-coords_upscaled[se][2])*basic_info[0]+ch) testIp = ByteProcessor(scale_info[2],scale_info[2],byteArray) testImage = ImagePlus("tester",testIp) Image_stamper.stampStack(testImage,resImages[ch],coords_upscaled[se][0],coords_upscaled[se][1],0) activeIp = chIps[ch] testImage.close() for ch in range(len(resImages)): IJ.saveAsTiff(resImages[ch],parentLSMFilePath+"_tiles/v_img/img_z_"+str(z+1)+"_c_"+str(ch+1)+".tif") #outPlaneImage = RGBStackMerge.mergeChannels(resImages,False) #IJ.saveAsTiff(outPlaneImage,parentLSMFilePath+"_tiles/v_img/img_z_"+str(z+1)+".tif")
def processMovie(root, files, outfile): """Concatenate images and write ome.tiff file. If image contains already multiple time points just copy the image""" files.sort() options = ImporterOptions() options.setId(files[0]) options.setVirtual(1) image = BF.openImagePlus(options) image = image[0] if image.getNFrames() > 1: msg = ("%s Contains multiple time points. Can only concatenate" " single time points!" %files[0]) raise RuntimeError(msg) image.close() reader = ImageReader() reader.setMetadataStore(MetadataTools.createOMEXMLMetadata()) reader.setId(files[0]) timeInfo = [] omeOut = reader.getMetadataStore() omeOut = setUpXml(omeOut, image, files) reader.close() image.close() itime = 0 for fileName in files: omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(fileName) timeInfo.append(getTimePoint(reader, omeMeta)) nrImages = reader.getImageCount() for i in range(0, reader.getImageCount()): try: dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2) except: dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages) omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheT(omeOut.getPlaneTheT(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages) itime = itime + 1 reader.close() IJ.showProgress(files.index(fileName), len(files)) try: omeOut.setPixelsTimeIncrement(float(dT/(len(files)-1)), 0) except: omeOut.setPixelsTimeIncrement(0, 0) if len(files) <= 1: raise RuntimeError('Found only one file. Nothing to concatenate') outfile = concatenateImagePlus(files, outfile) filein = RandomAccessInputStream(outfile) fileout = RandomAccessOutputStream(outfile) saver = TiffSaver(fileout, outfile) saver.overwriteComment(filein, omeOut.dumpXML()) fileout.close() filein.close()
def register_hyperstack_subpixel(imp, channel, shifts, target_folder, virtual): """ Takes the imp, determines the x,y,z drift for each pair of time points, using the preferred given channel, and outputs as a hyperstack. The shifted image is computed using TransformJ allowing for sub-pixel shifts using interpolation. This is quite a bit slower than just shifting the image by full pixels as done in above function register_hyperstack(). However it significantly improves the result by removing pixel jitter. """ # Compute bounds of the new volume, # which accounts for all translations: minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts) # Make shifts relative to new canvas dimensions # so that the min values become 0,0,0 for shift in shifts: shift.x -= minx shift.y -= miny shift.z -= minz # new canvas dimensions: width = int(imp.width + maxx - minx) height = int(maxy - miny + imp.height) slices = int(maxz - minz + imp.getNSlices()) print "New dimensions:", width, height, slices # prepare stack for final results stack = imp.getStack() if virtual is True: names = [] else: registeredstack = ImageStack(width, height, imp.getProcessor().getColorModel()) # prepare empty slice for padding empty = imp.getProcessor().createProcessor(width, height) IJ.showProgress(0) for frame in range(1, imp.getNFrames()+1): IJ.showProgress(frame / float(imp.getNFrames()+1)) fr = "t" + zero_pad(frame, len(str(imp.getNFrames()))) # for saving files in a virtual stack # init shift = shifts[frame-1] tmpstack = ImageStack(width, height, imp.getProcessor().getColorModel()) print "frame",frame,"correcting drift",-shift.x-minx,-shift.y-miny,-shift.z-minz IJ.log(" frame "+str(frame)+" correcting drift "+str(round(-shift.x-minx,2))+","+str(round(-shift.y-miny,2))+","+str(round(-shift.z-minz,2))) # for doing the same with imglib2 i would have to put the channel loop # to the outside and translate each individual channel as long as i don't figure out # to two wrap a composite imglib2 image into an imp # Add all slices of this frame stack = imp.getStack() for s in range(1, imp.getNSlices()+1): for ch in range(1, imp.getNChannels()+1): ip = stack.getProcessor(imp.getStackIndex(ch, s, frame)) ip2 = ip.createProcessor(width, height) # potentially larger ip2.insert(ip, 0, 0) tmpstack.addSlice("", ip2) # Pad the end (in z) of this frame for s in range(imp.getNSlices(), slices): for ch in range(1, imp.getNChannels()+1): tmpstack.addSlice("", empty) # Set correct dimensions of this frame # ..it is important *not* to set the calibration as translation should be in pixels units imp_tmpstack = ImagePlus("registered time points", tmpstack) imp_tmpstack.setProperty("Info", imp.getProperty("Info")) imp_tmpstack.setDimensions(imp.getNChannels(), slices, 1) imp_tmpstack.setOpenAsHyperStack(True) # subpixel translation imp_translated = translate_using_imagescience(imp_tmpstack, shift.x, shift.y, shift.z) #imp_translated = translate_using_imglib2(imp_tmpstack, shift.x, shift.y, shift.z) imp_translated.setProperty("Info", imp.getProperty("Info")) imp_translated.setDimensions(imp.getNChannels(), slices, 1) imp_translated.setOpenAsHyperStack(True) # Add translated frame to final time-series stack = imp_translated.getStack() for s in range(1, imp_translated.getNSlices()+1): ss = "_z" + zero_pad(s, len(str(slices))) for ch in range(1, imp_translated.getNChannels()+1): ip = stack.getProcessor(imp_translated.getStackIndex(ch, s, 1)) if virtual is True: name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif" names.append(name) currentslice = ImagePlus("", ip) currentslice.setCalibration(imp.getCalibration().copy()) currentslice.setProperty("Info", imp.getProperty("Info")); FileSaver(currentslice).saveAsTiff(target_folder + "/" + name) else: registeredstack.addSlice("", ip) IJ.showProgress(1) if virtual is True: # Create virtual hyper stack with the result registeredstack = VirtualStack(width, height, None, target_folder) for name in names: registeredstack.addSlice(name) registeredstack_imp = ImagePlus("registered time points", registeredstack) registeredstack_imp.setDimensions(imp.getNChannels(), slices, imp.getNFrames()) registeredstack_imp.setCalibration(imp.getCalibration().copy()) registeredstack_imp.setOpenAsHyperStack(True) else: registeredstack_imp = ImagePlus("registered time points", registeredstack) registeredstack_imp.setCalibration(imp.getCalibration().copy()) registeredstack_imp.setProperty("Info", imp.getProperty("Info")) registeredstack_imp.setDimensions(imp.getNChannels(), slices, imp.getNFrames()) registeredstack_imp.setOpenAsHyperStack(True) if 1 == registeredstack_imp.getNChannels(): return registeredstack_imp #IJ.log("\nHyperstack dimensions: time frames:" + str(registeredstack_imp.getNFrames()) + ", slices: " + str(registeredstack_imp.getNSlices()) + ", channels: " + str(registeredstack_imp.getNChannels())) # Else, as composite mode = CompositeImage.COLOR; if isinstance(imp, CompositeImage): mode = imp.getMode() else: return registeredstack_imp return CompositeImage(registeredstack_imp, mode)
def _update_progress(self): '''Raises the progress by one and updates the progress bar. ''' self.progress += 1 IJ.showProgress(self.progress, self.full_progress)
saveFilePath = saveFileDir + saveFileName savefilehandler = open(saveFilePath,"w") waitDialog = WaitForUserDialog("Use freeform tool to outline the piece of tissue") waitDialog.show() roi = theImage.getRoi() if (roi is not None): print type(roi) thePolygon = roi.getPolygon() boundRect = thePolygon.getBounds() for i in range(boundRect.x,boundRect.x+boundRect.width): pos_pixels = 0 tot_pixels = 0 IJ.showProgress(i-boundRect.x,boundRect.width) for j in range(boundRect.y,boundRect.y+boundRect.height): if thePolygon.contains(i,j): value = ip.getPixelValue(i,j) tot_pixels = tot_pixels + 1 if (value > 128): pos_pixels = pos_pixels + 1 if tot_pixels > 0: pos_fraction = pos_pixels / float(tot_pixels) else: pos_fraction = 0 str_out = str(i) + "," + str(pos_pixels) + "," + str(tot_pixels) + "," + str(pos_fraction) + "\n" savefilehandler.write(str_out) savefilehandler.close()
def register_hyperstack_subpixel(imp, channel, shifts, target_folder, virtual): """ Takes the imp, determines the x,y,z drift for each pair of time points, using the preferred given channel, and outputs as a hyperstack. The shifted image is computed using TransformJ allowing for sub-pixel shifts using interpolation. This is quite a bit slower than just shifting the image by full pixels as done in above function register_hyperstack(). However it significantly improves the result by removing pixel jitter. """ # Compute bounds of the new volume, # which accounts for all translations: minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts) # Make shifts relative to new canvas dimensions # so that the min values become 0,0,0 for shift in shifts: shift.x -= minx shift.y -= miny shift.z -= minz # new canvas dimensions: width = int(imp.width + maxx - minx) height = int(maxy - miny + imp.height) slices = int(maxz - minz + imp.getNSlices()) print "New dimensions:", width, height, slices # prepare stack for final results stack = imp.getStack() if virtual is True: names = [] else: registeredstack = ImageStack(width, height, imp.getProcessor().getColorModel()) # prepare empty slice for padding empty = imp.getProcessor().createProcessor(width, height) IJ.showProgress(0) # get raw data as stack stack = imp.getStack() # loop across frames for frame in range(1, imp.getNFrames()+1): IJ.showProgress(frame / float(imp.getNFrames()+1)) fr = "t" + zero_pad(frame, len(str(imp.getNFrames()))) # for saving files in a virtual stack # get and report current shift shift = shifts[frame-1] print "frame",frame,"correcting drift",-shift.x-minx,-shift.y-miny,-shift.z-minz IJ.log(" frame "+str(frame)+" correcting drift "+str(round(-shift.x-minx,2))+","+str(round(-shift.y-miny,2))+","+str(round(-shift.z-minz,2))) # loop across channels for ch in range(1, imp.getNChannels()+1): tmpstack = ImageStack(width, height, imp.getProcessor().getColorModel()) # get all slices of this channel and frame for s in range(1, imp.getNSlices()+1): ip = stack.getProcessor(imp.getStackIndex(ch, s, frame)) ip2 = ip.createProcessor(width, height) # potentially larger ip2.insert(ip, 0, 0) tmpstack.addSlice("", ip2) # Pad the end (in z) of this channel and frame for s in range(imp.getNSlices(), slices): tmpstack.addSlice("", empty) # subpixel translation imp_tmpstack = ImagePlus("", tmpstack) imp_translated = translate_single_stack_using_imglib2(imp_tmpstack, shift.x, shift.y, shift.z) # add translated stack to final time-series translated_stack = imp_translated.getStack() for s in range(1, translated_stack.getSize()+1): ss = "_z" + zero_pad(s, len(str(slices))) ip = translated_stack.getProcessor(s).duplicate() # duplicate is important as otherwise it will only be a reference that can change its content if virtual is True: name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif" names.append(name) currentslice = ImagePlus("", ip) currentslice.setCalibration(imp.getCalibration().copy()) currentslice.setProperty("Info", imp.getProperty("Info")); FileSaver(currentslice).saveAsTiff(target_folder + "/" + name) else: registeredstack.addSlice("", ip) IJ.showProgress(1) if virtual is True: # Create virtual hyper stack registeredstack = VirtualStack(width, height, None, target_folder) for name in names: registeredstack.addSlice(name) registeredstack_imp = ImagePlus("registered time points", registeredstack) registeredstack_imp.setCalibration(imp.getCalibration().copy()) registeredstack_imp.setProperty("Info", imp.getProperty("Info")) registeredstack_imp = HyperStackConverter.toHyperStack(registeredstack_imp, imp.getNChannels(), slices, imp.getNFrames(), "xyzct", "Composite"); return registeredstack_imp
y_start = 0 if y_stop > bin*height: y_stop = bin*height start_time = time.time() title = image_in.getShortTitle() bit_depth = image_in.getBitDepth() options = NewImage.FILL_BLACK image_out = NewImage.createImage(title+'_corrected', width, height, 1, bit_depth, options) pixels_out = image_out.getProcessor().getPixels() image_noI = NewImage.createImage(title+'_corrected_noI', width, height, 1, bit_depth, options) pixels_noI = image_noI.getProcessor().getPixels() for x2 in range(y_start, y_stop): if x2%bin != 0: continue IJ.showProgress(x2 - y_start, y_stop - y_start) start_time_row = time.time() for x1 in range(0, bin*width): if x1%bin != 0: continue pixels_out[x2/bin*width+x1/bin] = get_corrected_intensity(x1, x2) pixels_noI[x1/bin + x2/bin*width] = pixels_in[int(round(calc_y1(x1, x2)/bin)) + int(round(calc_y2(x1, x2)/bin))*width] print "Calculation of row", x2/bin, ":", int(round(time.time() - start_time_row)),"s" duration = time.time() - start_time print 'This calculation took',int(round(duration)),'s (',int(round(width*height/duration)),'px/s )' IJ.showProgress(1.0) image_out.show() image_noI.show()
def compute_and_update_frame_translations_dt(imp, channel, method, bg_level, dt, process, roiz, shifts = None): """ imp contains a hyper virtual stack, and we want to compute the X,Y,Z translation between every t and t+dt time points in it using the given preferred channel. if shifts were already determined at other (lower) dt they will be used and updated. """ nt = imp.getNFrames() # get roi (could be None) roi = imp.getRoi() # init shifts if shifts == None: shifts = [] for t in range(nt): shifts.append(Point3f(0,0,0)) # compute shifts IJ.showProgress(0) for t in range(dt, nt+dt, dt): if t > nt-1: # together with above range till nt+dt this ensures that the last data points are not missed out t = nt-1 # nt-1 is the last shift (0-based) IJ.log(" between frames "+str(t-dt+1)+" and "+str(t+1)) # get (cropped and processed) image at t-dt start_time = time.time() roi1, roiz1 = shift_roi(imp, roi, roiz, shifts[t-dt]) imp1 = extract_frame_process_roi(imp, t+1-dt, channel, process, roi1, roiz1) # get (cropped and processed) image at t roi2, roiz2 = shift_roi(imp, roi, roiz, shifts[t]) imp2 = extract_frame_process_roi(imp, t+1, channel, process, roi2, roiz2) print(" prepared images in [s]: "+str(round(time.time()-start_time,3))) if roi: print " ROI at frame",t-dt+1,"is", roi1.getBounds().x, roi1.getBounds().y, roiz1.z print " ROI at frame",t+1,"is", roi2.getBounds().x, roi2.getBounds().y, roiz2.z # compute shift start_time = time.time() if (method == 1): local_new_shift = compute_shift_using_phasecorrelation(imp1, imp2, bg_level) elif (method == 2): local_new_shift = compute_shift_using_center_of_mass(imp1, imp2, bg_level) print(" computed shift in [s]: "+str(round(time.time()-start_time,3))) if roi: # total shift is shift of rois plus measured drift print " measured additional shift of",local_new_shift,"on top of roi shift:",shift_between_rois(roi2, roiz2, roi1, roiz1) local_new_shift = add_Point3f(local_new_shift, shift_between_rois(roi2, roiz2, roi1, roiz1)) print(" total local shift: "+str(round(local_new_shift.x,3))+" "+str(round(local_new_shift.y,3))+" "+str(round(local_new_shift.z,3))) # determine the shift that we knew alrady local_shift = subtract_Point3f(shifts[t],shifts[t-dt]) # compute difference between new and old measurement (which come from different dt) add_shift = subtract_Point3f(local_new_shift,local_shift) #print "++ old shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),local_shift.x,local_shift.y,local_shift.z) #print "++ add shift between %s and %s: dx=%s, dy=%s, dz=%s" % (int(t-dt+1),int(t+1),add_shift.x,add_shift.y,add_shift.z) # update shifts from t-dt to the end (assuming that the measured local shift will presist till the end) for i,tt in enumerate(range(t-dt,nt)): # for i>dt below expression basically is a linear drift predicition for the frames at tt>t # this is only important for predicting the best shift of the ROI # the drifts for i>dt will be corrected by the next measurements shifts[tt].x += 1.0*i/dt * add_shift.x shifts[tt].y += 1.0*i/dt * add_shift.y shifts[tt].z += 1.0*i/dt * add_shift.z # print "updated shift till frame",tt+1,"is",shifts[tt].x,shifts[tt].y,shifts[tt].z IJ.showProgress(1.0*t/(nt+1)) IJ.showProgress(1) return shifts
def process_time_points(root, files, outdir): '''Concatenate images and write ome.tiff file. If image contains already multiple time points just copy the image''' concat = 1 files.sort() options = ImporterOptions() options.setId(files[0]) options.setVirtual(1) image = BF.openImagePlus(options) image = image[0] if image.getNFrames() > 1: IJ.log(files[0] + " Contains multiple time points. Can only concatenate single time points! Don't do anything!") image.close() return width = image.getWidth() height = image.getHeight() for patt in pattern: outName = re.match(patt, os.path.basename(files[0])) if outName is None: continue if outdir is None: outfile = os.path.join(root, outName.group(1) + '.ome.tif') else: outfile = os.path.join(outdir, outName.group(1) + '.ome.tif') reader = ImageReader() reader.setMetadataStore(MetadataTools.createOMEXMLMetadata()) reader.setId(files[0]) timeInfo = [] omeOut = reader.getMetadataStore() omeOut = setUpXml(omeOut, image, files) reader.close() image.close() IJ.log ('Concatenates ' + os.path.join(root, outName.group(1) + '.ome.tif')) itime = 0 try: for ifile, fileName in enumerate(files): print fileName omeMeta = MetadataTools.createOMEXMLMetadata() reader.setMetadataStore(omeMeta) reader.setId(fileName) #print omeMeta.getPlaneDeltaT(0,0) #print omeMeta.getPixelsTimeIncrement(0) if fileName.endswith('.czi'): if ifile == 0: T0 = omeMeta.getPlaneDeltaT(0,0).value() dT = omeMeta.getPlaneDeltaT(0,0).value() - T0 unit = omeMeta.getPlaneDeltaT(0,0).unit() else: timeInfo.append(getTimePoint(reader, omeMeta)) unit = omeMeta.getPixelsTimeIncrement(0).unit() try: dT = round(timeInfo[files.index(fileName)]-timeInfo[0],2) except: dT = (timeInfo[files.index(fileName)]-timeInfo[0]).seconds nrImages = reader.getImageCount() for i in range(0, reader.getImageCount()): try: omeOut.setPlaneDeltaT(dT, 0, i + itime*nrImages) except TypeError: omeOut.setPlaneDeltaT(Time(dT, unit),0, i + itime*nrImages) omeOut.setPlanePositionX(omeOut.getPlanePositionX(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionY(omeOut.getPlanePositionY(0,i), 0, i + itime*nrImages) omeOut.setPlanePositionZ(omeOut.getPlanePositionZ(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheC(omeOut.getPlaneTheC(0,i), 0, i + itime*nrImages) omeOut.setPlaneTheT(NonNegativeInteger(itime), 0, i + itime*nrImages) omeOut.setPlaneTheZ(omeOut.getPlaneTheZ(0,i), 0, i + itime*nrImages) itime = itime + 1 reader.close() IJ.showProgress(files.index(fileName), len(files)) try: incr = float(dT/(len(files)-1)) except: incr = 0 try: omeOut.setPixelsTimeIncrement(incr, 0) except TypeError: #new Bioformats >5.1.x omeOut.setPixelsTimeIncrement(Time(incr, unit),0) outfile = concatenateImagePlus(files, outfile) if outfile is not None: filein = RandomAccessInputStream(outfile) fileout = RandomAccessOutputStream(outfile) saver = TiffSaver(fileout, outfile) saver.overwriteComment(filein,omeOut.dumpXML()) fileout.close() filein.close() except: traceback.print_exc() finally: #close all possible open files try: reader.close() except: pass try: filein.close() except: pass try: fileout.close() except:
def register_hyperstack_subpixel(imp, channel, shifts, target_folder, virtual): """ Takes the imp, determines the x,y,z drift for each pair of time points, using the preferred given channel, and outputs as a hyperstack. The shifted image is computed using TransformJ allowing for sub-pixel shifts using interpolation. This is quite a bit slower than just shifting the image by full pixels as done in above function register_hyperstack(). However it significantly improves the result by removing pixel jitter. """ # Compute bounds of the new volume, # which accounts for all translations: minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts) # Make shifts relative to new canvas dimensions # so that the min values become 0,0,0 for shift in shifts: shift.x -= minx shift.y -= miny shift.z -= minz # new canvas dimensions: width = int(imp.width + maxx - minx) height = int(maxy - miny + imp.height) slices = int(maxz - minz + imp.getNSlices()) #print "New dimensions:", width, height, slices # prepare stack for final results stack = imp.getStack() if virtual is True: names = [] else: registeredstack = ImageStack(width, height, imp.getProcessor().getColorModel()) # prepare empty slice for padding empty = imp.getProcessor().createProcessor(width, height) IJ.showProgress(0) # get raw data as stack stack = imp.getStack() # loop across frames for frame in range(1, imp.getNFrames()+1): IJ.showProgress(frame / float(imp.getNFrames()+1)) fr = "t" + zero_pad(frame, len(str(imp.getNFrames()))) # for saving files in a virtual stack # get and report current shift shift = shifts[frame-1] #print "frame",frame,"correcting drift",-shift.x-minx,-shift.y-miny,-shift.z-minz IJ.log(" frame "+str(frame)+" correcting drift "+str(round(-shift.x-minx,2))+","+str(round(-shift.y-miny,2))+","+str(round(-shift.z-minz,2))) # loop across channels for ch in range(1, imp.getNChannels()+1): tmpstack = ImageStack(width, height, imp.getProcessor().getColorModel()) # get all slices of this channel and frame for s in range(1, imp.getNSlices()+1): ip = stack.getProcessor(imp.getStackIndex(ch, s, frame)) ip2 = ip.createProcessor(width, height) # potentially larger ip2.insert(ip, 0, 0) tmpstack.addSlice("", ip2) # Pad the end (in z) of this channel and frame for s in range(imp.getNSlices(), slices): tmpstack.addSlice("", empty) # subpixel translation imp_tmpstack = ImagePlus("", tmpstack) imp_translated = translate_single_stack_using_imglib2(imp_tmpstack, shift.x, shift.y, shift.z) # add translated stack to final time-series translated_stack = imp_translated.getStack() for s in range(1, translated_stack.getSize()+1): ss = "_z" + zero_pad(s, len(str(slices))) ip = translated_stack.getProcessor(s).duplicate() # duplicate is important as otherwise it will only be a reference that can change its content if virtual is True: name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif" names.append(name) currentslice = ImagePlus("", ip) currentslice.setCalibration(imp.getCalibration().copy()) currentslice.setProperty("Info", imp.getProperty("Info")); FileSaver(currentslice).saveAsTiff(target_folder + "/" + name) else: registeredstack.addSlice("", ip) IJ.showProgress(1) if virtual is True: # Create virtual hyper stack registeredstack = VirtualStack(width, height, None, target_folder) for name in names: registeredstack.addSlice(name) registeredstack_imp = ImagePlus("registered time points", registeredstack) registeredstack_imp.setCalibration(imp.getCalibration().copy()) registeredstack_imp.setProperty("Info", imp.getProperty("Info")) registeredstack_imp = HyperStackConverter.toHyperStack(registeredstack_imp, imp.getNChannels(), slices, imp.getNFrames(), "xyzct", "Composite"); return registeredstack_imp
def process(self, tree): toFiles(tree, self.targetFolder) IJ.showProgress(self.ai.incrementAndGet(), self.total)
def __settings(self, imgName) : """ Lets the user to choose different measures to make, and displays it following the choice of the user. """ try : dico=self.__dictCells[imgName] except KeyError : try : dico=self.__dictCells[imgName[:-4]] except KeyError : return False else : imgName=imgName[:-4] dico=self.__dictCells[imgName] for cellname in dico.keys() : self.__dictMeasures[dico[cellname]]={} # Represents the datas on a diagram def diagrambuttonPressed(event) : IJ.showMessage("Push 'Auto' button each time you want to see the diagram") x1=10 y1=20 x2=100 y2=50 x3=60 y3=30 xr=10 yr=20 wr=20 hr=20 rect=Rectangle(xr,yr,wr,hr) #img=IJ.getImage() #nbslices=self.__img.getImageStackSize() nbslices=self.__maxLife IJ.run("Hyperstack...", "title=Diagram type=32-bit display=Color width="+str(x2+(nbslices+1)*x3)+" height="+str(y2+y3*len(dico))+" channels=1 slices="+str(len(self.__measures))+" frames=1") im=IJ.getImage() ip=im.getProcessor() for i in range(len(self.__measures)) : indiceligne=0 maxvalue=0 minvalue=1000000 im.setPosition(1,i+1,1) for cellname in self.__listcellname : indiceligne+=1 for indicecolonne in range(1,nbslices+1): rect.setLocation(x2+indicecolonne*x3+int(x3/6),(y1+indiceligne*y3-int(y3/2))) # we create at the first iteration a dictionary with the rectangles (for a future use) if i==0 : self.__gridrectangle[(indiceligne,indicecolonne)]=Rectangle(rect) im.setRoi(rect) ipr=im.getProcessor() # we find the min and max values of the datas for a measure given. if self.__dictMeasures[dico[cellname]][self.__measures[i]][indicecolonne-1]>maxvalue : maxvalue=self.__dictMeasures[dico[cellname]][self.__measures[i]][indicecolonne-1] if self.__dictMeasures[dico[cellname]][self.__measures[i]][indicecolonne-1]<minvalue : minvalue=self.__dictMeasures[dico[cellname]][self.__measures[i]][indicecolonne-1] # we fill the rectangle with the value of the measure ipr.setValue(self.__dictMeasures[dico[cellname]][self.__measures[i]][indicecolonne-1]) ipr.fill() # we write the names and the n of slices on the image with the maxvalue. ip.setValue(maxvalue) ip.moveTo(x1,y1) ip.drawString(self.__measures[i]) for j in range(1,nbslices+1) : ip.moveTo(x2+j*x3,y1) ip.drawString("Slice "+str(j)) j=0 for cellname in self.__listcellname : ip.moveTo(x1,y2+j*y3) ip.drawString(cellname) j+=1 im.killRoi() im=IJ.run(im,"Fire","") IJ.run("Brightness/Contrast...", "") #im.setMinAndMax(minvalue,maxvalue) #im.updateImage() #we add a mouse listener in order to be able to show the roi corresponding to a rectangle when the user clicks on it. listener = ML() listener.name=imgName for imp in map(WindowManager.getImage, WindowManager.getIDList()): if imp.getTitle().startswith("Diagram") : win = imp.getWindow() if win is None: continue win.getCanvas().addMouseListener(listener) # Represents the datas on a series of graphs. def graphbuttonPressed(event) : colors=[] #img=IJ.getImage() #nbslices=self.__img.getImageStackSize() nbslices=self.__maxLife acell=dico.values()[0] if self.__useTime : x = acell.getListTimes() namex="Time sec" else : x = range(1,nbslices+1) namex = "Frame" maxx=max(x) minx=min(x) #x=[i for i in range(1,nbslices+1)] font=Font("new", Font.BOLD, 14) tempname = WindowManager.getUniqueName(self.__img.getShortTitle()) for i in range(len(self.__measures)) : #print "i", i, self.__measures[i] yarray=[] flag=True miny=10000000000 maxy=-1000000000 #we find the min and max values in order to set the scale. for cellname in self.__listcellname : colors.append(dico[cellname].getColor()) yarray.append(self.__dictMeasures[dico[cellname]][self.__measures[i]]) #for meas in self.__dictMeasures[dico[cellname]][self.__measures[i]] : for meas in yarray[-1] : if (meas<miny) and (Double.isNaN(meas)==False) : miny=meas if max(yarray[-1])>maxy : maxy=max(yarray[-1]) miny-=0.1*miny maxy+=0.1*maxy count=0.05 for j in range(len(yarray)) : if j==0 : if len(self.__measures)>1 : plot=Plot("Plots-"+str(self.__measures[i]),namex,str(self.__measures[i]),x,yarray[j]) else : plot=Plot("Plot-"+tempname,namex,str(self.__measures[i]),x,yarray[j]) plot.setLimits(minx,maxx,miny,maxy) plot.setColor(colors[j]) plot.changeFont(font) plot.addLabel(0.05, count, self.__listcellname[j]) else : plot.setColor(colors[j]) plot.setLineWidth(3) plot.addPoints(x,yarray[j],Plot.LINE) plot.addLabel(0.05, count, self.__listcellname[j]) count+=0.05 plot.setColor(colors[0]) plot.show() if len(self.__measures)>1 : IJ.run("Images to Stack", "name="+tempname+"-plots title=Plots- use") #def histbuttonPressed(event) : # # pass # Represents the values in a tab. def tabbuttonPressed(event) : tab="\t" headings=[] measures=[] #img=IJ.getImage() #for i in range(self.__img.getImageStackSize()+1) : for i in range(self.__maxLife+1) : headings.append("Slice "+str(i)) headings[0]=WindowManager.getUniqueName(self.__img.getShortTitle()) #for m in self.__measurescompl : for m in self.__dictMeasures[dico[self.__listcellname[0]]].keys() : headstring="" for head in headings: headstring+=head+tab tw=TextWindow(self.__listfiles[0]+"-"+m,headstring,"",800,600) tp=tw.getTextPanel() #for cellname in dico.keys() : for cellname in self.__listcellname : line=[] line=[str(meas)+tab for meas in self.__dictMeasures[dico[cellname]][m]] line.insert(0, cellname+tab) linestr="" for s in line: linestr+=s tp.appendLine(linestr) tp.updateDisplay() if self.__measuresparambool_global[0] : tw=TextWindow("Latency","cell\tLatency", "",800,600) tp=tw.getTextPanel() for i in range(len(self.__listcellname)) : #if latencies[i][0] : line=self.__listcellname[i]+"\t"+str(latencies[i][1]) #else : line=self.__listcellname[i]+"\t"+"NaN" line=self.__listcellname[i]+"\t"+str(latencies[i][1]) tp.appendLine(line) tp.updateDisplay() def helpbuttonPressed(event) : IJ.showMessage("TO DO") def newsetPressed(event) : gd0.dispose() self.__settings() def alignbuttonPressed(event) : IJ.showMessage("TO DO") def mergebuttonPressed(event) : IJ.showMessage("TO DO") def saveResults() : #if len(self.__listcellname) == 0 : nbslices=self.__maxLife acell=dico.values()[0] if self.__useTime : x = acell.getListTimes() namex="Time_sec" else : x = range(1,nbslices+1) namex = "Frame" if not path.exists(self.__rootpath+"Results"+os.path.sep) : os.makedirs(self.__rootpath+os.path.sep+"Results"+os.path.sep, mode=0777) tab="\t" nl="\n" measures=[] headstring="" #if self.__savemode : mode = "a" #else : mode ="w" mode = "a" #for i in range(1, self.__maxLife+1) :headstring += "Slice_"+str(i)+tab for i in range(self.__maxLife) :headstring += str(x[i])+tab #for m in self.__measurescompl : for m in self.__dictMeasures[dico[self.__listcellname[0]]].keys() : f = open(self.__rootpath+"Results"+os.path.sep+m+".txt", mode) #f.write(m+nl) f.write(imgName+"-"+self.__time+"-"+m+"-"+namex+tab+headstring+nl) if len(self.__listcellname) == 0 : f.write("no cells") else : for cellname in self.__listcellname : linestr=cellname+tab for measure in self.__dictMeasures[dico[cellname]][m] : #print m, cellname, measure linestr += str(measure)+tab linestr+=nl f.write(linestr) f.close() if self.__measuresparambool_global[0] : m = "Latency" f = open(self.__rootpath+"Results"+os.path.sep+m+".txt", mode) f.write(imgName+"-"+self.__time+"-"+m+nl) for i in range(len(self.__listcellname)) : #if latencies[i][0] : line=self.__listcellname[i]+"\t"+str(latencies[i][1]) #else : line=self.__listcellname[i]+"\t"+"NaN" line=self.__listcellname[i]+"\t"+str(latencies[i][1]) line+=nl f.write(line) f.close() # # ----------- main measures dialog ------------------------- # # Allows the user to choose the measures to make, etc.. measureslabels_indep=["MaxFeret","MinFeret","AngleFeret","XFeret","YFeret","Area","Angle","Major","Minor","Solidity","AR","Round","Circ","XC","YC","FerCoord","FerAxis","MidAxis"] measureslabels_dep=["Mean","StdDev","IntDen","Kurt","Skew","XM","YM","Fprofil","MidProfil","NFoci","ListFoci","ListAreaFoci","ListPeaksFoci","ListMeanFoci"] measureslabels_global=["Latency", "velocity", "cumulatedDist"] measureslabels_dep_tabonly=set(["MidAxis","FerCoord","FerAxis","Fprofil","MidProfil","ListFoci","ListAreaFoci","ListPeaksFoci","ListMeanFoci"]) ens_measures_global=set(measureslabels_global) ens_measures_indep=set(measureslabels_indep) ens_measures_dep=set(measureslabels_dep) measureslabels=[] for label in measureslabels_indep : measureslabels.append(label) for label in measureslabels_dep : measureslabels.append(label) #self.__defaultmeasures=[False for i in range(len(measureslabels))] #self.__defaultmeasures_global=[False for i in range(len(measureslabels_global))] gdmeasures=NonBlockingGenericDialog("MeasuresChoice") gdmeasures.setFont(Font("Courrier", 1, 10)) gdmeasures.addMessage("******* TIME SETTINGS *******") gdmeasures.addCheckbox("Only starting at begining :", self.__onlystart) # 1 only start gdmeasures.addNumericField("Minimal Lifetime : ",self.__minLife,0) gdmeasures.addNumericField("Maximal Lifetime : ",self.__maxLife,0) #gdmeasures.addNumericField("Maximal Lifetime : ",self.__img.getImageStackSize(),0) gdmeasures.addCheckbox("x axis in seconds", self.__useTime) # 2 use time gdmeasures.addMessage("") gdmeasures.addMessage("") gdmeasures.addMessage("Choose the measures to make on the cells : ") gdmeasures.addMessage("******* TIME MEASURES *******") gdmeasures.addCheckboxGroup(4,8,measureslabels,self.__defaultmeasures) gdmeasures.addMessage("") gdmeasures.addMessage("******* GLOBAL MEASURES *******") gdmeasures.addMessage("PLEASE : If you have selected movement parameters you MUST to select XC and YC !") gdmeasures.addCheckboxGroup(3,1,measureslabels_global,self.__defaultmeasures_global) gdmeasures.addNumericField("Noise value for maxima finder: ",self.__noise,0) gdmeasures.addMessage("") gdmeasures.addMessage("******* OPTIONS *******") gdmeasures.addCheckbox("Select the cells in next dialog ?", self.__onlyselect) # 3 only select gdmeasures.addCheckbox("Save results to text files ?", self.__savetables) # 4 save files #gdmeasures.addCheckbox("Append mode ?", self.__savemode) # 5 append mode gdmeasures.addCheckbox("Analyse in batch mode ?", self.__batchanalyse) # 6 analysis batch mode gdmeasures.addCheckbox("Update overlay ?", self.__updateoverlay) # 7 update overlay gdmeasures.addMessage("") gdmeasures.addMessage("") help_panel=Panel() helpbutton=Button("HELP") helpbutton.actionPerformed = helpbuttonPressed help_panel.add(helpbutton) gdmeasures.addPanel(help_panel) gdmeasures.hideCancelButton() if not self.__batchanalyse : gdmeasures.showDialog() self.__onlystart=gdmeasures.getNextBoolean() # 1 only start self.__minLife=gdmeasures.getNextNumber() self.__maxLife=gdmeasures.getNextNumber() self.__useTime=gdmeasures.getNextBoolean() # 2 use time self.__measuresparambool=[] self.__measuresparambool_global=[] for i in range(len(measureslabels)) : self.__measuresparambool.append(gdmeasures.getNextBoolean()) self.__defaultmeasures[i]=self.__measuresparambool[-1] for i in range(len(measureslabels_global)) : self.__measuresparambool_global.append(gdmeasures.getNextBoolean()) self.__defaultmeasures_global[i] = self.__measuresparambool_global[i] self.__noise=gdmeasures.getNextNumber() self.__onlyselect=gdmeasures.getNextBoolean() # 3 only select self.__savetables = gdmeasures.getNextBoolean() # 4 save files #self.__savemode = gdmeasures.getNextBoolean() # 5 append mode self.__batchanalyse = gdmeasures.getNextBoolean() # 6 analyse mode self.__updateoverlay = gdmeasures.getNextBoolean() # 7 update overlay # we update a list of all cells that have a lifetime corresponding to what the user chose. if len (self.__allcells) == 0 : for cellname in dico.keys() : if dico[cellname].getLifeTime()>=self.__minLife : #and dico[cellname].getLifeTime()<=self.__maxLife : if self.__onlystart : if dico[cellname].getSlideInit()<2 : self.__allcells.append(cellname) else : self.__allcells.append(cellname) if self.__noise == 0 : self.__noise = None if self.__batchanalyse : self.__onlyselect = False if self.__onlyselect : try : self.__gw except AttributeError : if not path.exists(self.__pathdir+"Selected-Cells"+os.path.sep) : os.makedirs(self.__pathdir+os.path.sep+"Selected-Cells"+os.path.sep, mode=0777) self.__gw = CellsSelection() self.__gw.setTitle(imgName) self.__gw.run(self.__allcells, self.__pathdir+"ROIs"+os.path.sep) self.__gw.show() self.__gw.setSelected(self.__allcells) while not self.__gw.oked and self.__gw.isShowing() : self.__gw.setLabel("Validate selection with OK !!") self.__listcellname = list(self.__gw.getSelected()) self.__gw.resetok() self.__gw.setLabel("...") self.__gw.hide() else : if self.__gw.getTitle() == imgName : self.__gw.show() self.__gw.setSelected(self.__listcellname) self.__listcellname[:]=[] while not self.__gw.oked and self.__gw.isShowing() : self.__gw.setLabel("Validate selection with OK !!") self.__listcellname = list(self.__gw.getSelected()) self.__gw.resetok() self.__gw.setLabel("...") self.__gw.hide() else : self.__gw.dispose() if not path.exists(self.__pathdir+"Selected-Cells"+os.path.sep) : os.makedirs(self.__pathdir+os.path.sep+"Selected-Cells"+os.path.sep, mode=0777) self.__gw = CellsSelection() self.__gw.setTitle(imgName) self.__gw.run(self.__allcells, self.__pathdir+"ROIs"+os.path.sep) self.__gw.show() self.__gw.setSelected(self.__allcells) self.__listcellname[:]=[] while not self.__gw.oked and self.__gw.isShowing() : self.__gw.setLabel("Validate selection with OK !!") self.__listcellname = list(self.__gw.getSelected()) self.__gw.resetok() self.__gw.setLabel("...") self.__gw.hide() filestodelet=glob.glob(self.__pathdir+"Selected-Cells"+os.path.sep+"*.cell") for f in filestodelet : os.remove(f) for cell in self.__listcellname : sourcestr = self.__pathdir+"Cells"+os.path.sep+cell+".cell" deststr = self.__pathdir+"Selected-Cells"+os.path.sep+cell+".cell" #os.system("copy "+sourcestr+", "+deststr) #shutil.copy(self.__pathdir+"Cells"+os.path.sep+cell+".cell",self.__pathdir+"Selected-Cells"+os.path.sep+cell+".cell") shutil.copy(sourcestr,deststr) self.__dictNcells[imgName] = len(self.__listcellname) else : self.__listcellname = list(self.__allcells) self.__dictNcells[imgName] = len(self.__listcellname) if len(self.__listcellname) == 0 : self.__dictNcells[imgName] = 0 return False self.__img.hide() # we make the measures. for i in range(len(measureslabels)) : IJ.showProgress(i, len(measureslabels)) if self.__measuresparambool[i]==True : self.__measurescompl.append(measureslabels[i]) if (measureslabels[i] in measureslabels_dep_tabonly)==False : self.__measures.append(measureslabels[i]) if (i<18) and (measureslabels[i] in ens_measures_indep) : self.__measureAll(self.__img,measureslabels[i],False, imgName, self.__noise) ens_measures_indep.discard(measureslabels[i]) if i>=18 : self.__measureAll(self.__img,measureslabels[i],True, imgName, self.__noise) if self.__measuresparambool_global[0] : # calculate latency latencies=[] for i in range(len(self.__listcellname)) : IJ.showProgress(i, len(self.__listcellname)) latencies.append(self.latencie(self.__listcellname[i], self.__img, imgName, self.__useTime)) if self.__measuresparambool_global[1] : # calculate velocity self.__measures.append("velocity") #velocities=[] for i in range(len(self.__listcellname)) : IJ.showProgress(i, len(self.__listcellname)) self.__measureVelocity(self.__img,imgName) if self.__measuresparambool_global[2] : # calculate cumulatedDistance self.__measures.append("cumulatedDist") #velocities=[] for i in range(len(self.__listcellname)) : IJ.showProgress(i, len(self.__listcellname)) self.__measurecumulDist(self.__img,imgName) self.__img.show() self.__img.getProcessor().resetThreshold() if self.__updateoverlay : if self.__img.getOverlay() is not None : self.__img.getOverlay().clear() outputrois=[] cellnames=[] self.__img.hide() for cellname in self.__listcellname : for r in dico[cellname].getListRoi(): if isinstance(r,Roi) : pos=r.getPosition() #print "MC overlay", cellname, r.getName(), pos #r.setPosition(0) #overlay.add(r) outputrois.append(r) if "cell" in r.getName() : cellnames.append(r.getName()) else : cellnames.append(str(pos)+"-"+cellname) #print cellnames[-1] rm = RoiManager.getInstance() if (rm==None): rm = RoiManager() rm.show() self.__img.show() IJ.selectWindow(self.__img.getTitle()) rm.runCommand("reset") for i in range(len(outputrois)) : outputrois[i].setName(cellnames[i]) rm.addRoi(outputrois[i]) rm.select(rm.getCount()-1) rm.runCommand("Rename", cellnames[i]) IJ.run("Show Overlay", "") rm.runCommand("UseNames", "true") rm.runCommand("Associate", "true") IJ.run(self.__img, "Labels...", "color=red font=12 show use") IJ.run(self.__img, "From ROI Manager", "") rm.runCommand("Show None") rm.runCommand("Show All") # ----------- batch analyse ------------------------ if self.__batchanalyse : if self.__savetables : saveResults() self.__dictMeasures.clear() self.__allcells[:]=[] self.__measurescompl[:]=[] self.__measures[:]=[] return False # ---------- display methodes dialog ---------------- # Allows the user to choose how to see the results of the measures. gd0=NonBlockingGenericDialog("Display") gd0.addMessage("How do you want to see the results ?") panel0=Panel() diagrambutton=Button("Diagram") diagrambutton.actionPerformed = diagrambuttonPressed panel0.add(diagrambutton) graphbutton=Button("Graph") graphbutton.actionPerformed = graphbuttonPressed panel0.add(graphbutton) tabbutton=Button("Tab") tabbutton.actionPerformed = tabbuttonPressed panel0.add(tabbutton) gd0.addPanel(panel0) gd0.addCheckbox("Analyse next stack ?", self.__nextstack) gd0.hideCancelButton() gd0.showDialog() self.__nextstack = gd0.getNextBoolean() # ---------- save tables --------------------------- if self.__savetables : saveResults() # --------- re-start analysis ------------------- self.__dictMeasures.clear() #self.__listcellname[:]=[] self.__allcells[:]=[] self.__measurescompl[:]=[] self.__measures[:]=[] if self.__nextstack : return False else : return True
def regBf(fn=None, imp=None, refId=None): """ Register a time series stack to a specified reference slice, from a file (imported by BioFormat) or a stack ImagePlus. Returns a registered ImagePlus. The stack must have only 1 z layer. refId is in the format of [int channel, int slice, int frame] If no refId is supplied, will use the first slice [1,1,1] Note: since TurboReg is used for registeration, there will be temporary opened image windows. """ ## Prepare the right ImagePlus if imp is None: if fn is None: od = OpenDialog("Choose a file", None) filename = od.getFileName() if filename is None: print "User canceled the dialog!" return else: directory = od.getDirectory() filepath = directory + filename print "Selected file path:", filepath else: if os.path.exists(fn) and os.path.isfile(fn): filepath = fn else: print "File does not exist!" return imps = BF.openImagePlus(filepath) imp = imps[0] if imp is None: print "Cannot load file!" return else: if fn is not None: print "File or ImagePlus? Cannot load both." return width = imp.getWidth() height = imp.getHeight() # C nChannels = imp.getNChannels() # Z nSlices = imp.getNSlices() # T nFrames = imp.getNFrames() # pixel size calibration = imp.getCalibration() # Only supoort one z layer if nSlices != 1: print "Only support 1 slice at Z dimension." return # set registration reference slice if refId is None: refC = 1 refZ = 1 refT = 1 else: refC = refId[0] refZ = refId[1] refT = refId[2] if (refC not in range(1, nChannels + 1) or refZ not in range(1, nSlices + 1) or refT not in range(1, nFrames + 1)): print "Invalid reference image!" return stack = imp.getImageStack() registeredStack = ImageStack(width, height, nChannels * nFrames * nSlices) # setup windows, these are needed by TurboReg tmpip = FloatProcessor(width, height) refWin = ImageWindow(ImagePlus("ref", tmpip)) bounds = refWin.getBounds() # refWin.setVisible(False) toRegWin = ImageWindow(ImagePlus("toReg", tmpip)) toRegWin.setLocation(bounds.width + bounds.x, bounds.y) # toRegWin.setVisible(False) toTransformWin = ImageWindow(ImagePlus("toTransform", tmpip)) toTransformWin.setLocation(2 * bounds.width + bounds.x, bounds.y) # toTransformWin.setVisible(False) # get reference image refImp = ImagePlus("ref", stack.getProcessor(imp.getStackIndex(refC, refZ, refT))) refWin.setImage(refImp) tr = TurboReg_() for t in xrange(1, nFrames + 1): IJ.showProgress(t - 1, nFrames) # print "t ", t # do TurboReg on reference channel toRegId = imp.getStackIndex(refC, refZ, t) toRegImp = ImagePlus("toReg", stack.getProcessor(toRegId)) toRegWin.setImage(toRegImp) regArg = "-align " +\ "-window " + toRegImp.getTitle() + " " +\ "0 0 " + str(width - 1) + " " + str(height - 1) + " " +\ "-window " + refImp.getTitle() + " " +\ "0 0 " + str(width - 1) + " " + str(height - 1) + " " +\ "-rigidBody " +\ str(width / 2) + " " + str(height / 2) + " " +\ str(width / 2) + " " + str(height / 2) + " " +\ "0 " + str(height / 2) + " " +\ "0 " + str(height / 2) + " " +\ str(width - 1) + " " + str(height / 2) + " " +\ str(width - 1) + " " + str(height / 2) + " " +\ "-hideOutput" tr = TurboReg_() tr.run(regArg) registeredImp = tr.getTransformedImage() sourcePoints = tr.getSourcePoints() targetPoints = tr.getTargetPoints() registeredStack.setProcessor(registeredImp.getProcessor(), toRegId) # toRegImp.flush() # apply transformation on other channels for c in xrange(1, nChannels + 1): # print "c ", c if c == refC: continue toTransformId = imp.getStackIndex(c, 1, t) toTransformImp = ImagePlus("toTransform", stack.getProcessor(toTransformId)) toTransformWin.setImage(toTransformImp) transformArg = "-transform " +\ "-window " + toTransformImp.getTitle() + " " +\ str(width) + " " + str(height) + " " +\ "-rigidBody " +\ str(sourcePoints[0][0]) + " " +\ str(sourcePoints[0][1]) + " " +\ str(targetPoints[0][0]) + " " +\ str(targetPoints[0][1]) + " " +\ str(sourcePoints[1][0]) + " " +\ str(sourcePoints[1][1]) + " " +\ str(targetPoints[1][0]) + " " +\ str(targetPoints[1][1]) + " " +\ str(sourcePoints[2][0]) + " " +\ str(sourcePoints[2][1]) + " " +\ str(targetPoints[2][0]) + " " +\ str(targetPoints[2][1]) + " " +\ "-hideOutput" tr = TurboReg_() tr.run(transformArg) registeredStack.setProcessor( tr.getTransformedImage().getProcessor(), toTransformId) # toTransformImp.flush() sourcePoints = None targetPoints = None IJ.showProgress(t, nFrames) IJ.showStatus("Frames registered: " + str(t) + "/" + str(nFrames)) refWin.close() toRegWin.close() toTransformWin.close() imp2 = ImagePlus("reg_" + imp.getTitle(), registeredStack) imp2.setCalibration(imp.getCalibration().copy()) imp2.setDimensions(nChannels, nSlices, nFrames) # print "type ", imp.getType() # print "type ", imp2.getType() # print nChannels, " ", nSlices, " ", nFrames # print registeredStack.getSize() for key in imp.getProperties().stringPropertyNames(): imp2.setProperty(key, imp.getProperty(key)) # comp = CompositeImage(imp2, CompositeImage.COLOR) # comp.show() # imp2 = imp.clone() # imp2.setStack(registeredStack) # imp2.setTitle("reg"+imp.getTitle()) # imp2.show() # imp.show() return imp2
# progress bar from ij import IJ imp = IJ.getImage() stack = imp.getImageStack() for i in xrange(1, stack.getSize() + 1): # Report Progress IJ.showProgress(i, stack.getSize() + 1) # 뭐라도 하기 ip = stack.getProcessor(i) # 완료 IJ.showProgress(1)
meanI = 0 dim = 0 print 'Applying top hat filter...' for s in range( 0, slices ): ip = stack.getProcessor(s+1) # ip, double radius, boolean createBackground, boolean lightBackground, boolean useParaboloid, # boolean doPresmooth, boolean correctCorners TOPHAT().rollingBallBackground( ip, 20, False, False, False, False, True ) for x in range( 0, imp.getWidth() ): for y in range( 0, imp.getHeight() ): val = ip.get( x, y ) if val > 500: meanI = meanI + val dim = dim + 1 IJ.showProgress(s+1, slices) #dim = imp.getWidth()*imp.getHeight()*slices intens = float(meanI)/float(dim) print 'Applying thresholding with intensity =', intens newImp = convertToBinaryMask( imp, int(intens) ) #newImp.show() # TODO #zp = ZProjector(newImp) #zp.setMethod( ZProjector.MAX_METHOD ) #zp.doProjection() #newMIPImp = zp.getProjection() #newMIPImp.show() #ipMIP = newMIPImp.getProcessor() #EDM().toEDM( ipMIP.convertToByte(True) )