def __init__(self,fname, size = 256, scale = 1 , batch_size=16): """ scale is even number for faster processing """ self.fname = fname self.img_src = = ND2_Reader(self.fname) self.cols = self.img_src.width self.rows = self.img_src.height self.batch_size = batch_size self.scale = scale self.size = size*self.scale self.probability_threshold = 0.5 idx_r = np.arange(0,self.rows,self.size) if idx_r[-1]+ self.size >self.rows: idx_r[-1] = self.rows-self.size idx_c = np.arange(0,self.cols,self.size) if idx_c[-1]+ self.size >self.cols: idx_c[-1] = self.cols-self.size rr,cc = np.meshgrid(idx_r,idx_c) idx_r = rr.ravel() idx_c = cc.ravel() self.idxs = np.vstack((idx_r,idx_c)).T print("Full image size", self.rows,self.cols)
def extract_nd2_metadata_sdk(f, interpolate=True, progress=None): """Interpolation fills in timestamps linearly for each well; x,y,z positions are copied from the first time point. """ with ND2_Reader(f) as nd2: ts = range(nd2.sizes['t']) ms = range(nd2.sizes['m']) if progress is None: progress = lambda x: x arr = [] for t, m in progress(list(product(ts, ms))): boundaries = [0, nd2.sizes['m'] - 1] skip = m not in boundaries and t > 0 if interpolate and skip: metadata = {} else: metadata = get_metadata_at_coords(nd2, t=t, m=m) metadata['t'] = t metadata['m'] = m metadata['file'] = f arr += [metadata] df_info = pd.DataFrame(arr) if interpolate: return (df_info.sort_values([ 'm', 't' ]).assign(x_um=lambda x: x['x_um'].fillna(method='ffill')).assign( y_um=lambda x: x['y_um'].fillna(method='ffill')).assign( z_um=lambda x: x['z_um'].fillna(method='ffill')).sort_values( ['t', 'm']).assign(t_ms=lambda x: x['t_ms'].interpolate())) else: return df_info
def deskew_file(path, tmat=DEFAULT_TMAT, mip=True): """Deskews an nd2 file at `path` using matrix tmat. Will create a new folder (with the same name as the file) of multichannel deskewed files for each timepoint. Args: path (str): the nd2 file to process tmat (np.ndarray): the transformation matrix mip (bool): Whether to write a MIP file """ tmat = np.array(tmat) dirname, fname = os.path.split(path) fname, ext = os.path.splitext(fname) outdir = os.path.join(dirname, fname + "_deskewed") os.makedirs(outdir, exist_ok=True) with ND2_Reader(path) as frames: frames.bundle_axes = "czyx" for frame in frames: print(f"processing frame: {frame.frame_no + 1:4} of {len(frames)}") # do the actual deskew to each channel # the .T rotates it 90 degrees out = np.stack([affineGPU(chan, tmat).T for chan in frame]) dst = os.path.join(outdir, f"{fname}_{frame.frame_no:04}.tif") # could add metadata here for voxel sizes imsave(dst, np.transpose(out, (1, 0, 2, 3)), imagej=True) if mip: mipdir = os.path.join(outdir, "MIPs") os.makedirs(mipdir, exist_ok=True) dst = os.path.join(mipdir, f"{fname}_{frame.frame_no:04}_mip.tif") imsave(dst, out.max(1), imagej=True)
def export_nd2_sdk_file_table(f_nd2, df_files): df = df_files.drop_duplicates('file_') with ND2_Reader(f_nd2) as nd2: nd2.iter_axes = 'm' nd2.bundle_axes = ['t', 'c', 'y', 'x'] for m, data in tqdn(enumerate(nd2)): f_out = df.query('m == @m')['file_'].iloc[0] save(f_out, data)
def parse_nd2_file(nd2_filepath): """ Parse images and metadata necessary for stitching from nd2 file. """ with ND2_Reader(nd2_filepath) as images: #print("The metadata is ", str(images.metadata).encode('utf-8')) num_fov = images.sizes['m'] num_channels = images.sizes['c'] num_rows = images.sizes['y'] num_columns = images.sizes['x'] fields_of_view = list(range(num_fov)) channels = [ images.metadata['plane_' + str(num)]['name'] for num in range(num_channels) ] microns_per_pixel = images.metadata['calibration_um'] try: images.iter_axes = 'mc' images.bundle_axes = 'zyx' except: images.iter_axes = 'c' images.bundle_axes = 'zyx' aggregated_images = [] coordinate_pairs = [] for z_stack in images: aggregated_image = np.max(z_stack, axis=0) aggregated_images.append(aggregated_image) coordinate_pair = z_stack.metadata['y_um'], z_stack.metadata[ 'x_um'] coordinate_pairs.append(coordinate_pair) aggregated_images = np.reshape( aggregated_images, (num_fov, num_channels, num_rows, num_columns)) coordinate_pairs = np.average(np.reshape(coordinate_pairs, (num_fov, num_channels, 2)), axis=1) print("Coordinate pairs is \n" + str(coordinate_pairs)) print("Shape is " + str(aggregated_images.shape)) data = { "aggregated_images": aggregated_images, "coordinate_pairs": coordinate_pairs, "fields_of_view": fields_of_view, "channels": channels, "microns_per_pixel": microns_per_pixel } return data
def main(name, fitter, roi_locations, shared, q): ND2 = ND2_Reader(name) frames = ND2 metadata = ND2.metadata #frames = frames[0:2] local_result = fitter.main(frames, metadata, roi_locations) for result_index, result in enumerate(local_result): #print(result) shared[9 * result_index:9 * (result_index + 1)] = result[:] # for index, value in enumerate(result): # shared[9*result_index+index] = value print("Done fiting") q.put(1)
def mt_main(name, fitter, frames_split, roi_locations, shared, q): nd2 = ND2_Reader(name) frames = nd2 metadata = nd2.metadata metadata['sequence_count'] = len(frames_split) frames = frames[frames_split] local_result = fitter.main(frames, metadata, roi_locations, q=q, start_frame=frames_split[0]) #q.put([frames_split[0], 5]) for result_index, result in enumerate(local_result): shared[9 * result_index:9 * (result_index + 1)] = result[:] print("Done fiting")
def Analyze(scan, protein, nd2_file, Roi_Data_file, status_file, time_file, n): try: data = pd.read_csv(Roi_Data_file) scan = data.shape[1] - 2 scan += 1 except: raise OSError('Roi_Data_file cannot be read.') pic = ND2_Reader( nd2_file + '%03d' % (scan) + '.nd2' ) #'/Volumes/tanaka/20160418_IL33andIL2/NDSequence%03d.nd2'%time) pic.iter_axes = 'm' pic.default_coords['c'] = protein newdata = [] newtime = [] for i in pic: roi_1 = np.mean(i[0:512, 0:511]) roi_2 = np.mean(i[0:512, 512:1022]) roi_3 = np.mean(i[513:1024, 0:511]) roi_4 = np.mean(i[513:1024, 512:1022]) df = roi_1, roi_2, roi_3, roi_4 newdata.extend(df) newtime.append(i.metadata['t_ms']) if not os.path.exists(Roi_Data_file): data, status, time = newFiles.newFiles(newdata, pic, Roi_Data_file, status_file, time_file, n) else: data = pd.read_csv(Roi_Data_file) data['%d' % scan] = newdata data.to_csv(Roi_Data_file, index=False) time = pd.read_csv(time_file) row, col = time.shape time['%d' % scan] = np.array(newtime) + max(time['%d' % (scan - 1)]) time.to_csv(time_file, index=False) return data, time, pic, scan
def lane_info(self): # dict for lane info nd2_new = ND2_Reader(self.nd2_file) nd2_new.iter_axes = 'm' lane_dict = {} lane_dict[0] = 1 pos_offset = {} cur_lane = 1 pos_min = 0 pos_offset[cur_lane] = pos_min - 1 y_prev = nd2_new[0].metadata['y_um'] pos_num = len(nd2_new) for i in range(1, pos_num): f = nd2_new[i] y_now = f.metadata['y_um'] if abs(y_now - y_prev) > 200: # a new lane cur_lane += 1 pos_min = i - 1 pos_offset[cur_lane] = pos_min lane_dict[i] = cur_lane y_prev = y_now nd2_new.close() self.lane_dict = lane_dict self.pos_offset = pos_offset
for result_index, result in enumerate(local_result): shared[9 * result_index:9 * (result_index + 1)] = result[:] print("Done fiting") # q.put([frames_split[0], 25]) # %% Main if __name__ == '__main__': # pr.enable() for name in filenames: with ND2_Reader(name) as ND2: basedir = os.getcwd() directory = name.split(".")[0].split("/")[-1] path = os.path.join(basedir, directory) try: os.mkdir(path) except: pass # parse ND2 info frames = ND2 metadata = ND2.metadata # frames = frames[0:2] roi_finder = roi_finding.roi_finder(
from skimage.draw import polygon image = np.zeros((512, 512)) inum = 11 z = read_roi_zip(path + "/" + zips[inum]) nd = "-" for a in nd2s: a1 = a.split('.')[0] print(a1, zips[inum]) if a1 in zips[inum]: nd = path + "/" + a break print(nd) print(zips[inum]) with ND2_Reader(nd, channel=0) as frames: frames.iter_axes = 'z' for frame in frames: tb = frame.sum(axis=(0)) print(frame.max(), frame.shape, tb.shape) for roi in z.keys(): c = z[roi]['x'] r = z[roi]['y'] rr, cc = polygon(r, c) image[rr, cc] = 255 plt.figure(figsize=(8, 16)) plt.subplot(2, 1, 1) plt.imshow(image) plt.subplot(2, 1, 2)
directory_success = False while not directory_success: try: mkdir(path) directory_success = True except: directory_try += 1 if directory_try == 1: path += "_%03d" % directory_try else: path = path[:-4] path += "_%03d" % directory_try nd2_new = ND2Reader(name) nd2_old = ND2Reader_SDK(name) nd2_alt = ND2_Reader(name) nd2_self = ND2ReaderSelf(name) nd2_self_v2 = ND2ReaderSelfV2(name) metadata_new = nd2_new.metadata metadata_old = nd2_old.metadata metadata_alt = nd2_alt.metadata metadata_self = nd2_self.metadata metadata_self_v2 = nd2_self_v2.get_metadata() # metadata_old_filtered = {k: v for k, v in metadata_old.items() if v is not None} # del metadata_old_filtered['time_start'] # del metadata_old_filtered['time_start_utc'] # metadata_new_filtered = {k: v for k, v in metadata_new.items() if v is not None}
from pims import ND2_Reader import os import numpy as np from matplotlib import pyplot as plt from numpy import array import cv2 as cv import glob ################################################################################ f = r'/media/devici/srinath_dhm02/srinath_confocal/pillars1/00200cs0015mum_inverted_r1' os.chdir(f) frames = ND2_Reader('00200cs0015mum_inverted_r1.nd2') matr = array(frames) channels = matr.shape[0] z_stacks = matr.shape[1] x_px = matr.shape[2] y_px = matr.shape[3] pillar_height = 10 * (10**-6) pillar_diameter = 30 * (10**-6) min_pillar_to_pillar_distance = 30 * (10**-6) n_ref = 1.403 z_res = 0.725 * (10**-6) r_res = 1.2429611 * (10**-6) phi = 1 - ((np.pi / 4) * (((pillar_diameter) /
""" Created on Thu Jul 16 13:54:01 2020 @author: s150127 """ import numpy as np import sys from pims import ND2_Reader # reader of ND2 files sys.path.append('../') name = "C:/Users/s150127/Downloads/_MBx dataset/1nMimager_newGNRs_100mW.nd2" ROI_locations = np.load('ROI_locations.npy') ROI_locations = ROI_locations - 1 #ROI_locations = ROI_locations[0:8, :] test_ROI_split = np.array_split(ROI_locations, 4) ND2 = ND2_Reader(name) metadata = ND2.metadata frames_list = list(range(metadata['sequence_count'])) frames_list_split = np.array_split(frames_list, 4) frames = ND2 for i, splitter in enumerate(frames_list_split): frames_split = frames[splitter]
def readND2_saveTIFF(images, output_path, dir_path_maxp_gfp, csv_file): new_filenames = [] try: with ND2_Reader(images) as frames: if 'm' in frames.axes: frames.iter_axes = 'm' frames.bundle_axes = 'zcyx' meta = frames.metadata if 'objective' in meta and 'λ' in meta['objective']: meta['objective'] = meta['objective'].replace("λ", "lambda") # Get channels info from metadata channels_names, channels_emission_n = get_channels_info(meta) # Not analizing if #channels<4: if channels_names[3]=="" or "5" in channels_names: logging.info(f'skipping file - missing channels - {images}') with open(failing_nd2_list_file,"a+") as f: f.write(f'{os.path.basename(images)}\n') return csv_file dapi_num = get_channel_num(channels_names, 'dapi_andor') gfp_num = get_channel_num(channels_names, 'gfp_andor') if dapi_num==-1 or gfp_num==-1: logging.info(f'skipping file - no dapi/gfp channel - {images}') with open(failing_nd2_list_file,"a+") as f: f.write(f'{os.path.basename(images)}\n') return csv_file condition = os.path.basename(images).split("_")[1].upper() if "rnai" not in images else f'RNAi_{os.path.basename(images).split("_")[2][5:]}' for i,frame in enumerate(frames): condition_max = find_latest_in_condition(csv_file["filename"].dropna().tolist(), condition) new_name = f'{condition}_{condition_max+1}' if (frame.shape[1]>frame.shape[3]): frame = np.swapaxes(frame,1,3) tif.imsave(os.path.join(output_path,f'{new_name}.tif'), frame, imagej=True, metadata=meta) os.chmod(os.path.join(output_path,f'{new_name}.tif'), 0o664) original_filename = f'{os.path.basename(images)[:-4]} series{i+1}' new_row = { "original filename": [original_filename], "filename": [new_name] } make_maxproj(frame, f'{new_name}.tif', gfp_num, dir_path_maxp_gfp) df = pd.DataFrame(new_row) csv_file = pd.concat([csv_file, df]) csv_file = csv_file.reset_index(drop=True) new_filenames.append(new_name) z_size = frame.shape[0] logging.info(f'success nd2_to_tif {original_filename}') frames.close() except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logging.warning(f'\nException ND2_to_tif {images}\n{e}') logging.warning(f'{exc_type} {exc_tb.tb_lineno}\n') with open(failing_nd2_list_file,"a+") as f: f.write(f'{os.path.basename(images)}\n') return csv_file os.remove(images) csv_file = fill_additional_df_cols(csv_file, new_filenames, channels_names, channels_emission_n, dapi_num, gfp_num, z_size, os.path.basename(images)) csv_file.to_csv(csv_path, index=False) return csv_file
# The goal of this script is to convert A SELECTED CHANNEL from nd2 files into opencv-readable videos from pims import ND2_Reader import numpy as np from matplotlib import pyplot as plt frames = ND2_Reader('./160910SPE6_st_13.5_D/160910SPE6_st_13.5.nd2') frames.bundle_axes = 'tyx' frames.iter_axes = 'c' #%% import cv2 img1 = c8bit[0] # queryImage img2 = c8bit[50]# trainImage orb = cv2.ORB_create() kp1, des1 = orb.detectAndCompute(img1,None) kp2, des2 = orb.detectAndCompute(img2,None) # create BFMatcher object bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) # Match descriptors. matches = bf.match(des1,des2) # Sort them in the order of their distance. matches = sorted(matches, key = lambda x:x.distance) out = np.zeros((512,512)) # Draw first 10 matches. out = cv2.drawMatches(img1,kp1,img2,kp2,matches[:50], out, flags=2)