def get_spacing_tif(file_path, axis): with tif.TiffFile(str(file_path)) as tifr: tifr.pages[0].tags try: if axis == 'X' or axis == 'Y': spacing = tifr.pages[0].tags[axis + 'Resolution'].value[1] elif axis == 'Z': spacing = tifr.imagej_metadata['spacing'] else: raise NotImplementedError( 'Getting spacing along {} is not yet implemented'.format( axis)) except: # todo: Find a way to extract spacing from BigStitcher stitched images spacing = 1 warnings.warn( 'Could not read the spacing. Spacing has been set to 1, 1, 1. Fix manually', ) return spacing
def Xcorrelation(ventral_file, dorsal_file, z_layer, sf_x, sf_y, sf_z): ''' ventral_file and dorsal_file must have the same dimension size in x and y z_layer is the most similar layer in ventral file to the first layer of the dorsal file sf_x, sf_y, and sf_z is the initial guess of the shift. the first image of the ventral file is the base to compare ''' # magic-number zone. Following numbers are the searching ranges sr_x = 60 sr_y = 60 sr_z = 30 with TFF.TiffFile(ventral_file) as v_tif: ventral_tif = v_tif.pages[z_layer] ventral_image = ventral_tif.asarray() ventral_stack = v_tif.pages[z_layer - round(sr_z / 2):z_layer + round(sr_z / 2)] #ventral_stack = ventral_stack[0].asarray() dorsal_first_image = TFF.imread(dorsal_file, key=0) repeat = 0 current_layer = z_layer next_layer = 0 while current_layer != next_layer: current_layer = next_layer # calculate the correlation in x,y plane [ventral_image, dorsal_image] = matchImageSize(ventral_image, dorsal_first_image) print("Images are matched") corr_matrix = narrorwXcorrelation(ventral_image, dorsal_image, sf_x, sf_y, sr_x, sr_y) max_ind = corr_matrix.argmax() shift_yx = np.unravel_index(max_ind, corr_matrix.shape) print([repeat, shift_yx, corr_matrix[shift_yx[0], shift_yx[1]]]) plt.subplot().imshow(corr_matrix) plt.show() dy = round(sr_y / 2) - shift_yx[0] dx = round(sr_x / 2) - shift_yx[1] # shift the x,y according to what it is found in the above step, and finding the best correlation along z n = 0 corr_array = [] layer_array = [] for ventral_layer in ventral_stack: ventral_image = ventral_layer.asarray() [ventral_image, dorsal_image] = matchImageSize(ventral_image, dorsal_first_image) [ventral_image, dorsal_image] = shiftImage(ventral_image, dorsal_image, dx, dy) corr = pearsonCorrelation(ventral_image, dorsal_image) corr_array.append(corr) layer_array.append(z_layer - round(sr_z / 2) + n) n = n + 1 corr_array = np.asarray(corr_array) layer_array = np.asarray(layer_array) max_ind = np.argmax(corr_array) next_layer = layer_array[max_ind] print([repeat, layer_array[max_ind], corr_array[max_ind]]) ventral_image = ventral_stack[max_ind].asarray() repeat = repeat + 1 [shift_ventral, shift_dorsal] = shiftImage(ventral_image, dorsal_image, dy, dx) TFF.imwrite("ventral_shift.tif", shift_ventral) TFF.imwrite("dorsal_shift.tif", shift_dorsal)
def stitchWellsInRAM(wellDict, inputDir, outputDir, resizeTo=None): tStart = time.time() for well in wellDict.keys(): t0 = time.time() print("Starting on wellID:", well) ncols = wellDict[well]['ncols'] nrows = wellDict[well]['nrows'] nChans, nTimepoints = wellDict[well]['nChannels'], wellDict[well][ 'nTimepoints'] nSlices = wellDict[well]['nSlices'] frame_interval, time_unit = wellDict[well]['frame_interval'], wellDict[ well]['timeunit'] pixelDepthDict = {8: "uint8", 16: "uint16", 32: "float32"} pixType = pixelDepthDict[wellDict[well]['pixelDepth']] xpix, ypix, pixel_resolution = wellDict[well]['xpix'], wellDict[well][ 'ypix'], wellDict[well]['pixel_resolution'] outWidth = xpix * ncols outHeight = ypix * nrows outArray = np.empty( (nTimepoints, nSlices, nChans, outHeight, outWidth), dtype=pixType) print("well array shape:", outArray.shape) for r in range(nrows): for c in range(ncols): t1 = time.time() startX = (ncols - c - 1) * xpix startY = r * ypix loadme = os.path.join(inputDir, wellDict[well]['positions'][(r, c)][0]) print("Working on file: ", str(loadme)) with tiffile.TiffFile(loadme) as tif: inArray = tif.asarray() print("File loaded as array, shape: ", inArray.shape, "loadtime: ", round(time.time() - t1), " s") try: outArray[:, :, :, startY:(startY + ypix), startX:(startX + xpix)] = inArray except: inArray = np.reshape( inArray, (nTimepoints, nSlices, nChans, xpix, ypix)) print("Input array reshaped to: ", inArray.shape) outArray[:, :, :, startY:(startY + ypix), startX:(startX + xpix)] = inArray print( "Input array appended to OutArray. Elapsed time for well: ", round(time.time() - t0)) saveme = os.path.join(outputDir, str(well) + "_stitched.tif") if resizeTo != None: print("Resizing outArray...") old_resolution = pixel_resolution[0] / float(pixel_resolution[1]) print( "Original pixel resolution was: %s / %s = %s px/resolution unit" % (pixel_resolution[0], pixel_resolution[1], old_resolution)) new_resolution = old_resolution * float(resizeTo) rational_new_resolution = Fraction( new_resolution).limit_denominator() pixel_resolution = (rational_new_resolution.numerator, rational_new_resolution.denominator) print("New pixel resolution is: %s / %s = %s px/resolution unit" % (pixel_resolution[0], pixel_resolution[1], new_resolution)) t = time.time() outArray = bin_ndarray( outArray, ((nTimepoints, nSlices, nChans, outHeight * resizeTo, outWidth * resizeTo))).astype("uint16") print("Done in %.2f s with resizing output!" % (round(time.time() - t))) bigTiffFlag = outArray.size * outArray.dtype.itemsize > 2000 * 2**20 print("bigTillFlag set to:", bigTiffFlag, "Saving output...(may take a while)") metadata = { "zStack": bool(1 - nSlices), "unit": "um", "tunit": time_unit, "finterval": frame_interval } imageJresolution = (pixel_resolution[0] / pixel_resolution[1]) / 10000.0 save_data = { "bigtiff": bigTiffFlag, "imagej": True, "resolution": (imageJresolution, imageJresolution, None), "metadata": metadata } tiffile.imsave(saveme, outArray, **save_data) print("Done with wellID: ", well, "in ", round(time.time() - t0, 2), " s") print("All done, it took ", round(time.time() - tStart, 2), " s in total!")
def stitchWellsOnDisk(wellDict, inputDir, outputDir, resizeTo=None): """ Avoids loading constituent files in to RAM. Stitches and rescales frame-by frame instead. Args: wellDict: (dict) wellID:filename inputDir: str or os.path outputDir: str or os.path resizeTo: Factor to rezie to, mus be a factor of 2 Returns: """ tStart = time.time() for well in wellDict.keys(): t0 = time.time() print("Starting on wellID:", well) ncols = wellDict[well]['ncols'] nrows = wellDict[well]['nrows'] nChans = wellDict[well]['nChannels'] nTimepoints = wellDict[well]['nTimepoints'] nSlices = wellDict[well]['nSlices'] frame_interval = wellDict[well]['frame_interval'] time_unit = wellDict[well]['timeunit'] pixelDepthDict = {8: "uint8", 16: "uint16", 32: "float32"} pixType = pixelDepthDict[wellDict[well]['pixelDepth']] xpix = wellDict[well]['xpix'] ypix = wellDict[well]['ypix'] pixel_resolution = wellDict[well]['pixel_resolution'] outWidth = xpix * ncols outHeight = ypix * nrows if resizeTo != None: print("Resizing outArray...") old_resolution = pixel_resolution[0] / float(pixel_resolution[1]) print("old resolution; {}, rational: {}".format( old_resolution, pixel_resolution)) new_resolution = old_resolution * resizeTo rational_new_resolution = Fraction( new_resolution).limit_denominator() pixel_resolution = (rational_new_resolution.numerator, rational_new_resolution.denominator) print("new resolution; {}, rational: {}".format( new_resolution, pixel_resolution)) outArray = np.empty((nTimepoints, nSlices, nChans, outHeight * resizeTo, outWidth * resizeTo), dtype=pixType) else: outArray = np.empty( (nTimepoints, nSlices, nChans, outHeight, outWidth), dtype=pixType) full_frame_buffer = np.empty((1, nSlices, nChans, outHeight, outWidth), dtype=pixType) print("output well array shape: {}, full_frame_array shape; {}".format( outArray.shape, full_frame_buffer.shape)) for frame in range(nTimepoints): t1 = time.time() print("Working on frame: {}".format(frame)) for row in range(nrows): for col in range(ncols): #get name of file at this row, col position loadme = os.path.join( inputDir, wellDict[well]['positions'][(row, col)][0]) #X/Y coordinates for insertion of subframe startX = (ncols - col - 1) * xpix startY = row * ypix #is_ome is set to False so that all .asarray calls will return the same shape with tiffile.TiffFile(loadme, is_ome=False) as tif: #only get current frame with channels and slices as an array if nSlices == 1: slice_to_load = slice(frame * nChans, (frame + 1) * nChans) else: slice_to_load = slice(frame * (nChans + nSlices), (frame + 1) * (nChans + nSlices)) inArray = tif.asarray(key=slice_to_load) try: full_frame_buffer[:, :, :, startY:(startY + ypix), startX:(startX + xpix)] = inArray except: inArray = np.reshape( inArray, (1, nSlices, nChans, xpix, ypix)) #print("Input array reshaped to: {}".format(inArray.shape)) full_frame_buffer[:, :, :, startY:(startY + ypix), startX:(startX + xpix)] = inArray print( "Frame: {}, Row: {}, Col: {} loaded from file: {} array shape: {}" .format(frame, row, col, tif.filename, inArray.shape)) if resizeTo != None: resized_frame_buffer = bin_ndarray( full_frame_buffer, (1, nSlices, nChans, outHeight * resizeTo, outWidth * resizeTo)) print("full_fame_buffer resized to {}".format( resized_frame_buffer.shape)) outArray[frame, :, :, :, :] = resized_frame_buffer else: outArray[frame, :, :, :, :] = full_frame_buffer print( "Frame, done in {} s. Elapsed time for well: {} min, Since start: {} min" .format(round((time.time() - t1), 1), round((time.time() - t0) / 60), round((time.time() - tStart) / 60))) saveme = os.path.join(outputDir, str(well) + "_stitched.tif") bigTiffFlag = outArray.size * outArray.dtype.itemsize > 2000 * 2**20 print("bigTillFlag set to:", bigTiffFlag, "Saving output...(may take a while)") metadata = { "zStack": bool(1 - nSlices), "unit": "um", "tunit": time_unit, "finterval": frame_interval } imageJresolution = (pixel_resolution[0] / pixel_resolution[1]) / 10000.0 save_data = { "bigtiff": bigTiffFlag, "imagej": True, "resolution": (imageJresolution, imageJresolution, None), "metadata": metadata } tiffile.imsave(saveme, outArray, **save_data) print("Done with wellID: ", well, "in ", round(time.time() - t0, 2), " s") print("All done, it took ", round(time.time() - tStart, 2), " s in total!")
def filenamesToDict(indir, wellNameDict=None): """ Transforms the (ome).tif stack-files in a directory into a dictionary. Filenames must conform to the following general pattern: ..._wellID-xxx_threeDigitRowNumber_threeDigitColNumber... :param indir: path to directory with files from a multiwell mosaic experiment wellNameDict: Dictionary of names to give the wells from the wellID-number property_dict = 'nrows': (int) No. of rows in well 'ncols': (int) No. of columns in well 'nChans': (int) No. of channels in image 'xpix': (int) No. pixels in X-dimension of images 'ypix': (int) No. pixels in Y-dimension of images 'frame_interval': (float) No. of time units between frames 'timeUnit': (str) time unit 'pixres': (float) size of pixels in resolution units 'resUnit': (str) spatial resolution unit 'pixelType':(str) pixeldepth of image 'positions': position_dict gives file at position (row,col) 'files':(list) names of files in well 'isConcat':(bool) If the sequence is split in to multiple files 'OME-XML':None <- Not implemented yet position_dict = {(row, col):filename(s)} :return: Dictionary with wellID:property_dict """ # Ingore non-.tif files in indir filenames = [fname for fname in os.listdir(indir) if ".tif" in fname] filenames.sort() wellDict = {} isConcat = False #Regex used to flexibly identify wells, rows, columns, and split files from filenames #Matches any number of digits preceded by "MMStack_" and followed by "-" well_regex = re.compile("(?<=MMStack_)\d+(?=-)") #Matches three digits preceded by three digits and "_" column_regex = re.compile("(?<=\d{3}_)\d{3}") #Matches three digits preceded by three digits and "_" row_regex = re.compile("(?<=_)\d{3}(?=_)") #Matches a digit preceded by three digits and "_", followed by ".ome" concat_regex = re.compile("(?<=\d{3}_\d{3}_)\d(?=\.ome)") first_file = os.path.join(indir, filenames[0]) print( "Opening the first file: \"%s\" to read its MicroManager metadata..." % first_file) with tiffile.TiffFile(first_file) as tif: frames = len(tif.pages) page = tif[0] print("There are %s frames and their shape is %s" % (frames, page.shape)) pixres = page.tags['x_resolution'].value #Assumes equal x/y resolution resoloution_unit = page.tags['resolution_unit'].value resoloution_unit = { 1: 'none', 2: 'inch', 3: 'centimeter' }[resoloution_unit] pixelDepth = page.tags['bits_per_sample'].value omexml = page.tags['image_description'].value try: meta_data = tif.micromanager_metadata frame_interval = meta_data['summary']['WaitInterval'] ypix, xpix = meta_data['summary']['Height'], meta_data['summary'][ 'Width'] nChannels = meta_data['summary']['Channels'] nSlices = meta_data['summary']['Slices'] nTimepoints = frames / (nChannels * nSlices) except: warnings.warn("Metadata read error!") print( "Something is wrong with the MicroManager metadata, replacing all values with defaults, this means" " that SCALING IS NOT CORRECT and the image stack is flattened over channels and slices!" ) frames = page.shape[0] meta_data = None frame_interval = 1 ypix, xpix = page.shape[1], page.shape[2] nChannels = 1 nSlices = 1 nTimepoints = frames for f in filenames: #Extract positioning information from filename with regex wellID = int(well_regex.search(f).group()) if wellNameDict != None: wellID = wellNameDict[wellID] rowNumber = int(row_regex.search(f).group()) columnNumber = int(column_regex.search(f).group()) concat = concat_regex.search(f) if concat != None: isConcat = True #If there is no key for wellID in wellDict -> create a dict of properties if wellDict.get(wellID) == None: wellDict[wellID] = { 'nrows': 1, 'ncols': 1, 'nChannels': int(nChannels), 'nSlices': int(nSlices), 'xpix': int(xpix), 'ypix': int(ypix), 'nTimepoints': int(nTimepoints), 'frame_interval': (frame_interval / 1000.0), 'timeunit': 's', 'pixel_resolution': pixres, #resolution stored as rational in tif tag 'resoloution_unit': resoloution_unit, 'pixelDepth': pixelDepth, 'positions': {}, 'files': [], 'isConcat': isConcat, 'OME-XML': omexml } #Populate Properties wellDict[wellID]['nrows'] = max(rowNumber + 1, wellDict[wellID]['nrows']) wellDict[wellID]['ncols'] = max(columnNumber + 1, wellDict[wellID]['ncols']) #List of filenames for the well wellDict[wellID]['files'].append(f) #Dict with (row, column):(list) filename(s) if wellDict[wellID]['positions'].get( (rowNumber, columnNumber)) == None: wellDict[wellID]['positions'][(rowNumber, columnNumber)] = [f] else: wellDict[wellID]['positions'][(rowNumber, columnNumber)].append(f) wellDict[wellID]['positions'][(rowNumber, columnNumber)].sort() wellDict[wellID]['isConcat'] = isConcat print( "wellDict created, proceed to choosing output directory and renaming wells." ) return wellDict
def do_it_all(indir, fname, outdir, maxFrameToAnalyze=200, px_resolution = 5.466, time_resolution = 5.0, r_max=300, r_step=1, n_sigma = 5, intervalWidth=10, saveFigFlag = False): """ :param indir: Input directory :param fname: Name of input file :param outdir: Where to save graphs and output data :param stopFrame: (int) Last frame to analyze output :param px_resolution: (float/int) Pixel resolution of input images in um/pixel :param time_resolution: (float) Time resolution of input images in frames/hour :param r_max: (int) maximum distance (in pixels) to calculate angles for :param r_step: (int) increment size of distance r (in pixels) during angle calculations :param n_sigma: (float) significance level that determines the correlation length :param intervalWidth: (int) width in frames to integrate angle data for, i.e. temporal integration :return: (dict) output data for further processing """ # PIV parameters piv_params = dict(window_size=32, overlap=16, dt=1, search_area_size=36, sig2noise_method="peak2peak") px_scale = px_resolution * piv_params["overlap"] piv_scaler = px_resolution*time_resolution # um/px * frames/hour = um*frames/px*hours print("Working on: %s" % (fname)) t0 = time.time() with tiffile.TiffFile(os.path.join(indir, fname)) as tif: arr = tif.asarray() arr = arr[0:maxFrameToAnalyze] arr = arr.astype(np.int32) #openPIV only works with 32bit arrays t1 = time.time() print("It took %.2f s to load %s, and has shape: %s, now processing PIV..." % (t1-t0, fname, arr.shape)) t2 = time.time() arr_u, arr_v, arr_x, arr_y = openPIV_array_processor(arr, stopFrame=arr.shape[0], **piv_params) print("It took %.2f s to process PIV, and output arrays have the shape: %s" % (t2-t1, arr_u.shape)) t3 = time.time() inst_order_params, align_idxs, speeds, timepoints = [], [], [], [] for frame in range(arr_u.shape[0]): iop = instantaneous_order_parameter(arr_u[frame], arr_v[frame]) inst_order_params.append(iop) ai = alignment_index(arr_u[frame], arr_v[frame], alsoReturnMagnitudes=True) aidx = np.nanmean(ai[0]) align_idxs.append(aidx) speed = np.nanmean(ai[1])*piv_scaler #px/frame * um*frames/px*hours -> um/hour speeds.append(speed) timepoint = frame*(1/float(time_resolution)) timepoints.append(timepoint) if saveFigFlag: plt.plot(timepoints, speeds, 'r', label=fname[:6]) plt.xlabel("Time (h)") plt.ylabel("Mean speed (um/h)") plt.title("Velocity vector magnitudes (speed)") plt.legend() savename = outdir + fname[:-4] + "_speed.pdf" plt.savefig(savename, bbox_inches='tight', pad_inches=0) plt.close() plt.plot(timepoints, align_idxs, 'b', label=fname[:6]) plt.xlabel("Time (h)") plt.ylabel("Align index") plt.title("Alignment index") plt.legend() savename = outdir + fname[:-4] + "_alignIndex.pdf" plt.savefig(savename, bbox_inches='tight', pad_inches=0) plt.close() plt.plot(timepoints, inst_order_params, 'g', label=fname[:6]) plt.xlabel("Time (h)") plt.ylabel("$\psi$") plt.title("Instantaneous order parameter ($\psi$)") plt.legend() savename = outdir + fname[:-4] + "_iop.pdf" plt.savefig(savename, bbox_inches='tight', pad_inches=0) plt.close() t4 = time.time() print("It took %.2f s to do Alignment indexes, order params and speeds" % (t4-t3)) metaResults = {} #temporal integration for interval in range(0, arr_u.shape[0], intervalWidth): results={} tmp_u = arr_u[interval:interval + intervalWidth] tmp_v = arr_v[interval:interval + intervalWidth] for t in range(tmp_u.shape[0]): for diagonal in range(0, min(tmp_u.shape[1], tmp_u.shape[2])): #follow the diagonal results = get_all_angles(tmp_u[t], tmp_v[t], (diagonal,diagonal), results, r_max=r_max, r_step=r_step, r_min=1) metaResults[interval] = dict(results) lastrs = [["n_sigma", "radius_px", "max_sign_r_um"]] lcorrs = {} #interval:correlation length in um for interv in sorted(metaResults.keys()): r = [] avg_angle = [] results = metaResults[interv] for radius, angles_list in results.items(): if (len(angles_list) == 0): print("Empty value at interval %i and r=%i" % (interv, radius)) break sanitized_angles = [a for a in angles_list if a <= 1.0] #Sometimes openPIV output wierd values if len(sanitized_angles) != len(angles_list): print("Bad angles at interval %i and radius %i, number ok: %i, not ok: %i" % (interv, radius, len(sanitized_angles), len(angles_list)-len(sanitized_angles))) mean_angle = np.nanmean(sanitized_angles) mean_angle_degrees = math.acos(mean_angle) * (180 / math.pi) sd_angles = np.nanstd(sanitized_angles) sd_angles_degrees = math.acos(sd_angles) * (180 / math.pi) SEM_angles = sd_angles_degrees / math.sqrt(len(sanitized_angles)) r.append(radius * px_scale) avg_angle.append(mean_angle_degrees) if (mean_angle_degrees + n_sigma * SEM_angles >= 90) and (interv not in lcorrs) and (len(r) != 0): #TODO interval_center = interval + intervalWidth/2 * time_resolution lcorrs[interv] = r[-1] print("%i-sigma reached at r=%i on interval %i, last significant distance was %.2f um" % (n_sigma, radius, interv, r[-1])) label = "interval: " + str(((interv) / time_resolution)) + " - " + \ str((interv + intervalWidth) / time_resolution) + " h, Lcorr = " + \ str(int(lcorrs[interv])) + " um" plt.plot(r, avg_angle, label=label) plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, mode="expand") plt.title("Average angle between velocity vectors " + fname[:6]) plt.xlabel("Distance in um") plt.ylabel("Mean angle (degrees)") if saveFigFlag: savename = outdir + fname[:-4] + "_cvv.pdf" plt.savefig(savename, bbox_inches='tight') else: plt.show() print("All done in %.2f s" % (time.time()-t0)) plt.close() return (inst_order_params, align_idxs, speeds, timepoints, lcorrs, metaResults)