def update_mask(self): """ Regenerate the masks for MaskRCNN and free-hand added (in case they are changed), and show in imageview. !!!ISSUE: getLocalHandlePositions: moving handles changes the position read out, dragging roi as a whole doesn't. """ # Binary mask from ML detection if len(self.selected_ML_Index) > 0: # Delete items in dictionary that are not roi items roi_dict = self.selected_cells_infor_dict.copy() del_key_list = [] for key in roi_dict: print(key) if 'ROIitem' not in key: del_key_list.append(key) for key in del_key_list: del roi_dict[key] self.MLmask = ProcessImage.ROIitem2Mask( roi_dict, mask_resolution=(self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1])) # Binary mask of added rois self.addedROIitemMask = ProcessImage.ROIitem2Mask( self.roi_list_freehandl_added, mask_resolution=(self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1])) self.intergrate_into_final_mask()
def create_mask(self): """ Create untransformed binary mask, sent out the signal to DMD widget for further transformation. Returns ------- None. """ flag_fill_contour = self.fillContourButton.isChecked() flag_invert_mode = self.invertMaskButton.isChecked() contour_thickness = self.thicknessSpinBox.value() target_laser = self.transform_for_laser_menu.selectedItems()[0].text() # Get the list of rois from the current ROIitems in "Select" Drawwidget. list_of_rois = self.get_list_of_rois() # Signal to mask requesting widget. current_mask_sig = [list_of_rois, flag_fill_contour, contour_thickness, flag_invert_mode, target_laser] #---- This is the roi list sent to DMD to generate final stack of masks.---- self.sig_to_calling_widget["mask_{}".format(self.mask_index_spinbox.value())] = current_mask_sig # Show the untransformed mask self.current_mask = ProcessImage.CreateBinaryMaskFromRoiCoordinates(list_of_rois, \ fill_contour = flag_fill_contour, \ contour_thickness = contour_thickness, \ invert_mask = flag_invert_mode) self.untransformed_mask_dict["mask_{}".format(self.mask_index_spinbox.value())] = self.current_mask self.mask_view.setImage(self.current_mask)
def create_voltage_signal(self, list_of_rois): filled_mask = OriginalImage = np.zeros((1000, 1000)) for roi in list_of_rois: filled_mask += polygon2mask((1000, 1000), (roi + 5) * 100) filled_mask = (filled_mask > 0).astype(int).transpose() fig, axs = plt.subplots(1, 1) axs.imshow(filled_mask) scanning_voltage = 5 points_per_contour = int(self.points_per_contour_textbox.text()) sampling_rate = int(self.sampling_rate_textbox.text()) contourScanningSignal = ProcessImage.mask_to_contourScanning_DAQsignals( filled_mask, OriginalImage, scanning_voltage, points_per_contour, sampling_rate, repeats=1) contourScanningSignal = np.vstack( (contourScanningSignal[0][0], contourScanningSignal[1][0])) self.galvoThread = pmtimagingTest_contour() self.galvoThread.setWave_contourscan(sampling_rate, contourScanningSignal, points_per_contour)
def gaussian_fit(self): # The upper edge. upper_position = self.current_pos + self.init_step_size # The lower edge. lower_position = self.current_pos - self.init_step_size # Generate the sampling positions. sample_positions = np.linspace(lower_position, upper_position, self.total_step_number) degree_of_focus_list = [] for each_pos in sample_positions: # Go through each position and write down the focus degree. degree_of_focus = self.evaluate_focus(round(each_pos, 6)) degree_of_focus_list.append(degree_of_focus) print(degree_of_focus_list) try: interpolated_fitted_curve = ProcessImage.gaussian_fit( degree_of_focus_list) # Generate the inpterpolated new focus position axis. x_axis_new = np.linspace(lower_position, upper_position, len(interpolated_fitted_curve)) # Generate a dictionary and find the position where has the highest focus degree. max_focus_pos = dict( zip(interpolated_fitted_curve, x_axis_new))[np.amax(interpolated_fitted_curve)] if False: # Plot the fitting. plt.plot(sample_positions, np.asarray(degree_of_focus_list), 'b+:', label='data') plt.plot(x_axis_new, interpolated_fitted_curve, 'ro:', label='fit') plt.legend() plt.title('Fig. Fit for focus degree') plt.xlabel('Position') plt.ylabel('Focus degree') plt.show() max_focus_pos = round(max_focus_pos, 6) print(max_focus_pos) # max_focus_pos_focus_degree = self.evaluate_focus(round(max_focus_pos, 6)) except: print("Fitting failed.") max_focus_pos = [False, self.current_pos] return max_focus_pos
def create_mask(self): flag_fill_contour = self.fillContourButton.isChecked() flag_invert_mode = self.invertMaskButton.isChecked() contour_thickness = self.thicknessSpinBox.value() list_of_rois = self.get_list_of_rois() self.mask = ProcessImage.CreateBinaryMaskFromRoiCoordinates(list_of_rois, \ fill_contour = flag_fill_contour, \ contour_thickness = contour_thickness, \ invert_mask = flag_invert_mode) self.mask_view.setImage(self.mask)
def finalmask_to_DMD_mask(self, laser, dict_transformations, flag_fill_contour=True, contour_thickness=1, flag_invert_mode=False, mask_resolution=(1024, 768)): """ Same goal as transform_to_DMD_mask, with input being the final binary mask and using find_contour to get all vertices and perform transformation, and then coordinates to mask. """ self.final_DMD_mask = ProcessImage.binarymask_to_DMD_mask(self.final_mask, laser, dict_transformations, flag_fill_contour = True, \ contour_thickness = 1, flag_invert_mode = False, mask_resolution = (1024, 768)) return self.final_DMD_mask
def receive_mask_coordinates(self, sig): ## Receive untransformed mask coordinates, transform them, create mask, ## send mask to DMD. [list_of_rois, flag_fill_contour, contour_thickness, flag_invert_mode] = sig list_of_rois = self.transform_coordinates(list_of_rois) self.mask = ProcessImage.CreateBinaryMaskFromRoiCoordinates(list_of_rois, \ fill_contour = flag_fill_contour, \ contour_thickness = contour_thickness, \ invert_mask = flag_invert_mode, mask_resolution = (768,1024)) fig, axs = plt.subplots(1, 1) axs.imshow(self.mask) self.DMD.send_data_to_DMD(self.mask)
def analyze_single_image(self, Rawimage, axis=None, show_result=True, show_each_cell=False): MLresults = self.DetectionOnImage(Rawimage, axis=axis, show_result=show_result) cell_Data, cell_counted_inRound, total_cells_counted_in_coord = \ ProcessImage.retrieveDataFromML(Rawimage, MLresults, show_each_cell = show_each_cell) print( "Number of cells counted so far: {}".format(cell_counted_inRound)) print("Number of cells counted in image: {}".format( total_cells_counted_in_coord)) return cell_Data
def evaluate_focus(self, obj_position=None): """ Evaluate the focus degree of certain objective position. Parameters ---------- obj_position : float, optional The target objective position. The default is None. Returns ------- degree_of_focus : float Degree of focus. """ if obj_position != None: self.pi_device_instance.move(obj_position) # Get the image. if self.source_of_image == "PMT": self.galvo_image = self.galvo.run() plt.figure() plt.imshow(self.galvo_image) plt.show() if False: with skimtiff.TiffWriter( os.path.join( r'M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2020-11-17 gaussian fit auto-focus cells\trial_11', str(obj_position).replace(".", "_") + '.tif')) as tif: tif.save(self.galvo_image.astype('float32'), compress=0) degree_of_focus = ProcessImage.local_entropy( self.galvo_image.astype('float32')) time.sleep(0.2) return degree_of_focus
def receive_mask_coordinates(self, sig_from_CoordinateWidget): """ Receive untransformed mask coordinates, transform them, create mask, send mask to DMD. PARAMETERS ---------- sig_from_CoordinateWidget : list. [[signal for first frame], [signal for second frame], ...] Signal sent out from CoordinateWidget which contains list of ROIs and other parameters for transformation and mask generation. """ for each_mask_key in sig_from_CoordinateWidget: print(f"len {len(sig_from_CoordinateWidget)}") list_of_rois = sig_from_CoordinateWidget[each_mask_key][0] flag_fill_contour = sig_from_CoordinateWidget[each_mask_key][1] contour_thickness = sig_from_CoordinateWidget[each_mask_key][2] flag_invert_mode = sig_from_CoordinateWidget[each_mask_key][3] for_which_laser = sig_from_CoordinateWidget[each_mask_key][4] list_of_rois_transformed = self.transform_coordinates( list_of_rois, for_which_laser) mask_single_frame = ProcessImage.CreateBinaryMaskFromRoiCoordinates( list_of_rois_transformed, fill_contour=flag_fill_contour, contour_thickness=contour_thickness, invert_mask=flag_invert_mode, mask_resolution=(768, 1024), ) fig, axs = plt.subplots(1, 1) axs.imshow(mask_single_frame) print("each_mask_key {}".format(each_mask_key)) # Here the self.mask is always a 3-dimentional np array with the 3rd axis being number of images. if each_mask_key == "mask_1": self.mask = mask_single_frame[:, :, np.newaxis] else: self.mask = np.concatenate( (self.mask, mask_single_frame[:, :, np.newaxis]), axis=2) self.DMD_actuator.send_data_to_DMD(self.mask)
def intergrate_into_final_mask(self): # Binary mask of added rois self.addedROIitemMask = ProcessImage.ROIitem2Mask( self.roi_list_freehandl_added, mask_resolution=(self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1])) #Display the RGB mask, ML mask plus free-hand added. self.Mask_edit_viewItem.setImage(gray2rgb(self.addedROIitemMask) * self.mask_color_multiplier + \ gray2rgb(self.MLmask) * self.mask_color_multiplier + gray2rgb(self.MLtargetedImg)) self.final_mask = self.MLmask + self.addedROIitemMask # In case the input image is 2048*2048, and it is resized to fit in MaskRCNN, need to convert back to original size for DMD tranformation. if self.final_mask.shape[0] != self.Rawimage.shape[ 0] or self.final_mask.shape[1] != self.Rawimage.shape[1]: self.final_mask = resize( self.final_mask, [self.Rawimage.shape[0], self.Rawimage.shape[1]], preserve_range=True).astype(self.final_mask.dtype) # self.final_mask = np.where(self.final_mask <= 1, self.final_mask, int(1)) plt.figure() plt.imshow(self.final_mask) plt.show()
def FluorescenceAnalysis(self, folder, round_num, save_mask=True): """ # ============================================================================= # Given the folder and round number, return a dictionary for the round # that contains each scanning position as key and structured array of detailed # information about each identified cell as content. # # Returned structured array fields: # - BoundingBox of cell ROI # - Mean intensity of whole cell area # - Mean intensity of cell membrane part # - Contour soma ratio # ============================================================================= Parameters ---------- folder : string. The directory to folder where the screening data is stored. round_num : string. The target round number of analysis. save_mask: bool. Whether to save segmentation masks. Returns ------- cell_Data : pd.DataFrame. Sum of return from func: retrieveDataFromML, for whole round. """ RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme( folder, file_keyword="Zmax") # RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme(folder, file_keyword = 'Zfocus') if not os.path.exists( os.path.join(folder, "MLimages_{}".format(round_num))): # If the folder is not there, create the folder to store ML segmentations os.mkdir(os.path.join(folder, "MLimages_{}".format(round_num))) for EachRound in RoundNumberList: cells_counted_in_round = 0 background_substraction = False # ============================================================================= # For background_substraction # ============================================================================= # If background images are taken background_images_folder = os.path.join( folder, "background {}".format(EachRound)) # print(background_images_folder) if os.path.exists(background_images_folder): # If the background image is taken to substract out background_substraction = True print("Run background substraction.") # Get all the background files names background_fileNameList = [] for file in os.listdir(background_images_folder): if "calculated background" not in file: if "tif" in file or "TIF" in file: background_fileNameList.append( os.path.join(background_images_folder, file)) background_image = ProcessImage.image_stack_calculation( background_fileNameList, operation="mean") # # Smooth the background image # background_image = ProcessImage.average_filtering( # background_image, filter_side_length = 25) # Save the individual file. with skimtiff.TiffWriter( os.path.join(background_images_folder, "calculated background.tif"), imagej=True, ) as tif: tif.save(background_image.astype(np.uint16), compress=0) if EachRound == round_num: # Start numbering cells at each round self.cell_counted_inRound = 0 for EachCoord in CoordinatesList: # ============================================================================= # For fluorescence: # ============================================================================= print(EachCoord) # -------------- readin image--------------- for Eachfilename in enumerate(fileNameList): if (EachCoord in Eachfilename[1] and EachRound in Eachfilename[1]): if "Zmax" in Eachfilename[1]: try: ImgNameInfor = Eachfilename[1][ 0:Eachfilename[1].index( "_PMT" )] # get rid of '_PMT_0Zmax.tif' in the name. except: ImgNameInfor = Eachfilename[1][ 0:Eachfilename[1].index( "_Cam" )] # get rid of '_Cam_Zmax.tif' in the name. elif "Zfocus" in Eachfilename[1]: ImgNameInfor = Eachfilename[1][ 0:len(Eachfilename[1]) - 16] # get rid of '_PMT_0Zfocus.tif' in the name. elif "Zpos1" in Eachfilename[1]: ImgNameInfor = Eachfilename[1][0:len( Eachfilename[1] )] # get rid of '_PMT_0Zfocus.tif' in the name. _imagefilename = os.path.join( folder, Eachfilename[1]) # ------------------------------------------ # ========================================================================= # USING MASKRCNN... # ========================================================================= # Imagepath = self.Detector._fixPathName(_imagefilename) Rawimage = imread(_imagefilename) # Background substraction if background_substraction == True: Rawimage = np.abs(Rawimage - background_image) camera_dark_level = 100 # # Normalize to the illumination intensity # Rawimage = np.uint16(Rawimage \ # / ((background_image - camera_dark_level)\ # /(np.amin(background_image) - camera_dark_level))) # if ClearImgBef == True: # # Clear out junk parts to make it esaier for ML detection. # RawimageCleared = self.preProcessMLimg(Rawimage, smallest_size=300, lowest_region_intensity=0.16) # else: # RawimageCleared = Rawimage.copy() image = ProcessImage.convert_for_MaskRCNN(Rawimage) # Run the detection on input image. results = self.Detector.detect([image]) MLresults = results[0] if save_mask == True: fig, ax = plt.subplots() # Set class_names = [None,None,None,None] to mute class name display. visualize.display_instances( image, MLresults["rois"], MLresults["masks"], MLresults["class_ids"], class_names=[None, None, None, None], ax=ax, centre_coors=MLresults["Centre_coor"], Centre_coor_radius=2, WhiteSpace=(0, 0), ) # MLresults['class_ids'],MLresults['scores'], # ax.imshow(fig) fig.tight_layout() # Save the detection image fig_name = os.path.join( folder, "MLimages_{}\{}.tif".format( round_num, ImgNameInfor)) plt.savefig(fname=fig_name, dpi=200, pad_inches=0.0, bbox_inches="tight") # segmentationImg = Image.fromarray(fig) #generate an image object # segmentationImg.save(os.path.join(folder, 'MLimages_{}\{}.tif'.format(round_num, ImgNameInfor)))#save as tif # Use retrieveDataFromML from ImageProcessing.py to extract numbers. if self.cell_counted_inRound == 0: ( cell_Data, self.cell_counted_inRound, total_cells_counted_in_coord, ) = ProcessImage.retrieveDataFromML( Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound, show_each_cell=False) else: ( Cell_Data_new, self.cell_counted_inRound, total_cells_counted_in_coord, ) = ProcessImage.retrieveDataFromML( Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound, show_each_cell=False) if len(Cell_Data_new) > 0: cell_Data = cell_Data.append(Cell_Data_new) # Count in total how many flat and round cells are identified. cells_counted_in_round += total_cells_counted_in_coord print("Number of round/flat cells in this round: {}".format( cells_counted_in_round)) # Save to excel cell_Data.to_excel( os.path.join( os.path.join( folder, round_num + "_" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + "_CellsProperties.xlsx", ))) return cell_Data
def bisection(self): """ Bisection way of finding focus. Returns ------- mid_position : float DESCRIPTION. """ # The upper edge in which we run bisection. upper_position = self.current_pos + self.init_step_size # The lower edge in which we run bisection. lower_position = self.current_pos - self.init_step_size for step_index in range(1, self.total_step_number + 1): # In each step of bisection finding. # In the first round, get degree of focus at three positions. if step_index == 1: # Get degree of focus in the mid. mid_position = (upper_position + lower_position) / 2 degree_of_focus_mid = self.evaluate_focus(mid_position) print("mid focus degree: {}".format( round(degree_of_focus_mid, 5))) # Break the loop if focus degree is below threshold which means # that there's no cell in image. if not ProcessImage.if_theres_cell( self.galvo_image.astype('float32')): print('no cell') mid_position = False break # Move to top and evaluate. degree_of_focus_up = self.evaluate_focus( obj_position=upper_position) print("top focus degree: {}".format( round(degree_of_focus_up, 5))) # Move to bottom and evaluate. degree_of_focus_low = self.evaluate_focus( obj_position=lower_position) print("bot focus degree: {}".format( round(degree_of_focus_low, 5))) # Sorting dicitonary of degrees in ascending. biesection_range_dic = { "top": [upper_position, degree_of_focus_up], "bot": [lower_position, degree_of_focus_low] } # In the next rounds, only need to go to center and update boundaries. elif step_index > 1: # The upper edge in which we run bisection. upper_position = biesection_range_dic["top"][0] # The lower edge in which we run bisection. lower_position = biesection_range_dic["bot"][0] # Get degree of focus in the mid. mid_position = (upper_position + lower_position) / 2 degree_of_focus_mid = self.evaluate_focus(mid_position) print("Current focus degree: {}".format( round(degree_of_focus_mid, 5))) # If sits in upper half, make the middle values new bottom. if biesection_range_dic["top"][1] > biesection_range_dic["bot"][1]: biesection_range_dic["bot"] = [ mid_position, degree_of_focus_mid ] else: biesection_range_dic["top"] = [ mid_position, degree_of_focus_mid ] print("The upper pos: {}; The lower: {}".format( biesection_range_dic["top"][0], biesection_range_dic["bot"][0])) return mid_position
def PMT_image_processing(self): """ Reconstruct the image from np array and save it. Returns ------- None. """ for imageSequence in range(self.repeatnum): try: self.PMT_image_reconstructed_array = self.data_collected_0[np.where(self.PMT_data_index_array == imageSequence+1)] Dataholder_average = np.mean(self.PMT_image_reconstructed_array.reshape(self.averagenum, -1), axis=0) Value_yPixels = int(self.lenSample_1/self.ScanArrayXnum) self.PMT_image_reconstructed = np.reshape(Dataholder_average, (Value_yPixels, self.ScanArrayXnum)) self.PMT_image_reconstructed = self.PMT_image_reconstructed[:, 50:550] # Crop size based on: M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2019-12-30 2p beads area test 4um # self.PMT_image_reconstructed = self.PMT_image_reconstructed[:, 70:326] # for 256*256 images # Evaluate the focus degree of re-constructed image. self.FocusDegree_img_reconstructed = ProcessImage.local_entropy(self.PMT_image_reconstructed.astype('float32')) print('FocusDegree_img_reconstructed is {}'.format(self.FocusDegree_img_reconstructed)) # Save the individual file. with skimtiff.TiffWriter(os.path.join(self.scansavedirectory, 'Round'+str(self.RoundWaveformIndex[0]) + '_Grid' + str(self.Grid_index) +'_Coords'+str(self.currentCoordsSeq)+'_R'+str(self.CurrentPosIndex[0])+'C'+str(self.CurrentPosIndex[1])+'_PMT_'+str(imageSequence)+'Zpos'+str(self.ZStackOrder)+'.tif'), imagej = True) as tif: tif.save(self.PMT_image_reconstructed.astype('float32'), compress=0, metadata = {"FocusPos: " : str(self.FocusPos)}) plt.figure() plt.imshow(self.PMT_image_reconstructed, cmap = plt.cm.gray) # For reconstructed image we pull out the first layer, getting 2d img. plt.show() #---------------------------------------------For multiple images in one z pos, Stack the arrays into a 3d array-------------------------------------------------------------------------- # if imageSequence == 0: # self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed[np.newaxis, :, :] # Turns into 3d array # else: # self.PMT_image_reconstructed_stack = np.concatenate((self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed[np.newaxis, :, :]), axis=0) # print(self.PMT_image_reconstructed_stack.shape) #---------------------------------------------Calculate the z max projection----------------------------------------------------------------------- if self.repeatnum == 1: # Consider one repeat image situlation if self.ZStackNum > 1: if self.ZStackOrder == 1: self.PMT_image_maxprojection_stack = self.PMT_image_reconstructed[np.newaxis, :, :] else: self.PMT_image_maxprojection_stack = np.concatenate((self.PMT_image_maxprojection_stack, self.PMT_image_reconstructed[np.newaxis, :, :]), axis=0) else: self.PMT_image_maxprojection_stack = self.PMT_image_reconstructed[np.newaxis, :, :] # Save the max projection image if self.ZStackOrder == self.ZStackNum: self.PMT_image_maxprojection = np.max(self.PMT_image_maxprojection_stack, axis=0) # Save the zmax file. with skimtiff.TiffWriter(os.path.join(self.scansavedirectory, 'Round'+str(self.RoundWaveformIndex[0])+ '_Grid' + str(self.Grid_index) + '_Coords'+str(self.currentCoordsSeq)+'_R'+str(self.CurrentPosIndex[0])+'C'+str(self.CurrentPosIndex[1])+'_PMT_'+str(imageSequence)+'Zmax'+'.tif'), imagej = True) as tif: tif.save(self.PMT_image_maxprojection.astype('float32'), compress=0, metadata = {"FocusPos: " : str(self.FocusPos)}) except: print('No.{} image failed to generate.'.format(imageSequence))
def analyze_images_in_folder(self, folder, generate_zmax=False, show_result=True, save_mask=True, save_excel=True): """ Given the folder, perform general analysis over the images in it. Parameters ---------- folder : str Path to the folder. generate_zmax : bool, optional Whether to calcaulate the z-max projection first. The default is False. show_result : bool, optional If show the machine learning segmentation results. The default is True. save_mask : bool, optional DESCRIPTION. The default is True. save_excel : bool, optional DESCRIPTION. The default is True. Returns ------- cell_Data : pd.dataframe DESCRIPTION. """ flat_cell_counted_in_folder = 0 total_cells_counted_in_folder = 0 # If need to do zmax projection first if generate_zmax == True: ProcessImage.cam_screening_post_processing(folder) # Here a new folder for maxProjection is generated inside, change the path folder = os.path.join(folder, 'maxProjection') # If background images are taken if os.path.exists(os.path.join(folder, 'background')): # If the background image is taken to substract out background_substraction = True # Get all the background files names background_fileNameList = [] for file in os.listdir(os.path.join(folder, 'background')): if "tif" in file: background_fileNameList.append( os.path.join(folder, 'background', file)) background_image = ProcessImage.image_stack_calculation( background_fileNameList, operation="mean") # Get a list of file names fileNameList = [] for file in os.listdir(folder): if "tif" in file and "LED" not in file: fileNameList.append(file) print(fileNameList) # Analyse each image for image_file_name in fileNameList: print(image_file_name) Rawimage = imread(os.path.join(folder, image_file_name)) if background_substraction == True: Rawimage = np.abs(Rawimage - background_image) # Analyze each image # Run the detection on input image. MLresults = self.DetectionOnImage(Rawimage, axis=None, show_result=show_result) if save_mask == True: if not os.path.exists(os.path.join(folder, 'ML_masks')): # If the folder is not there, create the folder os.mkdir(os.path.join(folder, 'ML_masks')) fig, ax = plt.subplots() # Set class_names = [None,None,None,None] to mute class name display. visualize.display_instances( Rawimage, MLresults['rois'], MLresults['masks'], MLresults['class_ids'], class_names=[None, None, None, None], ax=ax, centre_coors=MLresults['Centre_coor'], Centre_coor_radius=2, WhiteSpace=( 0, 0)) #MLresults['class_ids'],MLresults['scores'], # ax.imshow(fig) fig.tight_layout() # Save the detection Rawimage fig_name = os.path.join( folder, 'ML_masks', 'ML_mask_{}.png'.format( image_file_name[0:len(image_file_name) - 4])) plt.savefig(fname=fig_name, dpi=200, pad_inches=0.0, bbox_inches='tight') if flat_cell_counted_in_folder == 0: cell_Data, flat_cell_counted_in_folder, total_cells_counted_in_coord = \ ProcessImage.retrieveDataFromML(Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder) else: Cell_Data_new, flat_cell_counted_in_folder, total_cells_counted_in_coord = \ ProcessImage.retrieveDataFromML(Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder) if len(Cell_Data_new) > 0: cell_Data = cell_Data.append(Cell_Data_new) total_cells_counted_in_folder += total_cells_counted_in_coord if save_excel == True: # Save to excel cell_Data.to_excel( os.path.join( folder, 'CellsProperties_{}flat_outof_{}cells.xlsx'.format( flat_cell_counted_in_folder, total_cells_counted_in_folder))) return cell_Data
def FluorescenceAnalysis(self, folder, round_num, save_mask=True): """ # ============================================================================= # Given the folder and round number, return a dictionary for the round # that contains each scanning position as key and structured array of detailed # information about each identified cell as content. # # Returned structured array fields: # - BoundingBox of cell ROI # - Mean intensity of whole cell area # - Mean intensity of cell membrane part # - Contour soma ratio # ============================================================================= Parameters ---------- folder : string. The directory to folder where the screening data is stored. round_num : string. The target round number of analysis. save_mask: bool. Whether to save segmentation masks. Returns ------- cell_Data : pd.DataFrame. Sum of return from func: retrieveDataFromML, for whole round. """ RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme( folder, file_keyword='Zmax') # RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme(folder, file_keyword = 'Zfocus') if not os.path.exists( os.path.join(folder, 'MLimages_{}'.format(round_num))): # If the folder is not there, create the folder os.mkdir(os.path.join(folder, 'MLimages_{}'.format(round_num))) for EachRound in RoundNumberList: cells_counted_in_round = 0 if EachRound == round_num: # Start numbering cells at each round self.cell_counted_inRound = 0 for EachCoord in CoordinatesList: # ============================================================================= # For tag fluorescence: # ============================================================================= print(EachCoord) #-------------- readin image--------------- for Eachfilename in enumerate(fileNameList): if EachCoord in Eachfilename[ 1] and EachRound in Eachfilename[1]: if '0Zmax' in Eachfilename[1]: ImgNameInfor = Eachfilename[1][ 0:len(Eachfilename[1]) - 14] # get rid of '_PMT_0Zmax.tif' in the name. elif '0Zfocus' in Eachfilename[1]: ImgNameInfor = Eachfilename[1][ 0:len(Eachfilename[1]) - 16] # get rid of '_PMT_0Zfocus.tif' in the name. _imagefilename = os.path.join( folder, Eachfilename[1]) #------------------------------------------ # ========================================================================= # USING MASKRCNN... # ========================================================================= # Imagepath = self.Detector._fixPathName(_imagefilename) Rawimage = imread(_imagefilename) # if ClearImgBef == True: # # Clear out junk parts to make it esaier for ML detection. # RawimageCleared = self.preProcessMLimg(Rawimage, smallest_size=300, lowest_region_intensity=0.16) # else: # RawimageCleared = Rawimage.copy() image = ProcessImage.convert_for_MaskRCNN(Rawimage) # Run the detection on input image. results = self.Detector.detect([image]) MLresults = results[0] if save_mask == True: fig, ax = plt.subplots() # Set class_names = [None,None,None,None] to mute class name display. visualize.display_instances( image, MLresults['rois'], MLresults['masks'], MLresults['class_ids'], class_names=[None, None, None, None], ax=ax, centre_coors=MLresults['Centre_coor'], Centre_coor_radius=2, WhiteSpace=(0, 0) ) #MLresults['class_ids'],MLresults['scores'], # ax.imshow(fig) fig.tight_layout() # Save the detection image fig_name = os.path.join( folder, 'MLimages_{}\{}.tif'.format( round_num, ImgNameInfor)) plt.savefig(fname=fig_name, dpi=200, pad_inches=0.0, bbox_inches='tight') # segmentationImg = Image.fromarray(fig) #generate an image object # segmentationImg.save(os.path.join(folder, 'MLimages_{}\{}.tif'.format(round_num, ImgNameInfor)))#save as tif if self.cell_counted_inRound == 0: cell_Data, self.cell_counted_inRound, total_cells_counted_in_coord = \ ProcessImage.retrieveDataFromML(Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound) else: Cell_Data_new, self.cell_counted_inRound, total_cells_counted_in_coord = \ ProcessImage.retrieveDataFromML(Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound) if len(Cell_Data_new) > 0: cell_Data = cell_Data.append(Cell_Data_new) # Count in total how many flat and round cells are identified. cells_counted_in_round += total_cells_counted_in_coord print("Number of round/flat cells in this round: {}".format( cells_counted_in_round)) # Save to excel cell_Data.to_excel( os.path.join( os.path.join( folder, round_num + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + '_CellsProperties.xlsx'))) return cell_Data
def analyze_images_in_folder( self, folder, generate_zmax=False, show_result=True, save_mask=True, save_excel=True, ): """ Given the folder, perform general analysis over the images in it. Parameters ---------- folder : str Path to the folder. generate_zmax : bool, optional Whether to calcaulate the z-max projection first. The default is False. show_result : bool, optional If show the machine learning segmentation results. The default is True. save_mask : bool, optional DESCRIPTION. The default is True. save_excel : bool, optional DESCRIPTION. The default is True. Returns ------- cell_Data : pd.dataframe DESCRIPTION. """ flat_cell_counted_in_folder = 0 total_cells_counted_in_folder = 0 background_substraction = False root_folder = folder # If need to do zmax projection first if generate_zmax == True: ProcessImage.cam_screening_post_processing(root_folder) # Here a new folder for maxProjection is generated inside, change the path folder = os.path.join(root_folder, "maxProjection") # If background images are taken if os.path.exists(os.path.join(root_folder, "background")): # If the background image is taken to substract out background_substraction = True print("Run background substraction.") # Get all the background files names background_fileNameList = [] for file in os.listdir(os.path.join(root_folder, "background")): if "calculated background" not in file: if "tif" in file or "TIF" in file: background_fileNameList.append( os.path.join(root_folder, "background", file)) # Average over multiple images background_image = ProcessImage.image_stack_calculation( background_fileNameList, operation="mean") # # Smooth the image # background_image = ProcessImage.average_filtering( # background_image, filter_side_length = 25) # Save the individual file. with skimtiff.TiffWriter( os.path.join(root_folder, "background", "calculated background.tif"), imagej=True, ) as tif: tif.save(background_image.astype(np.uint16), compress=0) # Get a list of file names fileNameList = [] for file in os.listdir(folder): if "tif" in file and "LED" not in file: fileNameList.append(file) print(fileNameList) # Analyse each image for image_file_name in fileNameList: print(image_file_name) Rawimage = imread(os.path.join(folder, image_file_name)) if background_substraction == True: Rawimage = np.abs(Rawimage - background_image).astype( np.uint16) camera_dark_level = 100 # # Normalize to the illumination intensity # Rawimage = np.uint16(Rawimage \ # / ((background_image - camera_dark_level)\ # /(np.amin(background_image) - camera_dark_level))) # Analyze each image # Run the detection on input image. MLresults = self.DetectionOnImage(Rawimage, axis=None, show_result=show_result) if save_mask == True: if not os.path.exists(os.path.join(folder, "ML_masks")): # If the folder is not there, create the folder os.mkdir(os.path.join(folder, "ML_masks")) fig, ax = plt.subplots() # Set class_names = [None,None,None,None] to mute class name display. visualize.display_instances( Rawimage, MLresults["rois"], MLresults["masks"], MLresults["class_ids"], class_names=[None, None, None, None], ax=ax, centre_coors=MLresults["Centre_coor"], Centre_coor_radius=2, WhiteSpace=(0, 0), ) # MLresults['class_ids'],MLresults['scores'], # ax.imshow(fig) fig.tight_layout() # Save the detection Rawimage fig_name = os.path.join( folder, "ML_masks", "ML_mask_{}.png".format( image_file_name[0:len(image_file_name) - 4]), ) plt.savefig(fname=fig_name, dpi=200, pad_inches=0.0, bbox_inches="tight") if flat_cell_counted_in_folder == 0: ( cell_Data, flat_cell_counted_in_folder, total_cells_counted_in_coord, ) = ProcessImage.retrieveDataFromML( Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder) else: ( Cell_Data_new, flat_cell_counted_in_folder, total_cells_counted_in_coord, ) = ProcessImage.retrieveDataFromML( Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder) if len(Cell_Data_new) > 0: cell_Data = cell_Data.append(Cell_Data_new) total_cells_counted_in_folder += total_cells_counted_in_coord if save_excel == True: # Save to excel cell_Data.to_excel( os.path.join( folder, "CellsProperties_{}flat_outof_{}cells.xlsx".format( flat_cell_counted_in_folder, total_cells_counted_in_folder), )) return cell_Data
def evaluate_focus(self, obj_position = None): """ Evaluate the focus degree of certain objective position. Parameters ---------- obj_position : float, optional The target objective position. The default is None. Returns ------- degree_of_focus : float Degree of focus. """ if obj_position != None: self.pi_device_instance.move(obj_position) # Get the image. if self.source_of_image == "PMT": self.galvo_image = self.galvo.run() plt.figure() plt.imshow(self.galvo_image) plt.show() if False: with skimtiff.TiffWriter(os.path.join(r'M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2020-11-17 gaussian fit auto-focus cells\trial_11', str(obj_position).replace(".", "_")+ '.tif')) as tif: tif.save(self.galvo_image.astype('float32'), compress=0) degree_of_focus = ProcessImage.local_entropy(self.galvo_image.astype('float32')) elif self.source_of_image == "Camera": # First configure the AOTF. self.AOTF_runner = DAQmission() # Find the AOTF channel key for key in self.imaging_conditions: if 'AO' in key: # like '488AO' AOTF_channel_key = key # Set the AOTF first. self.AOTF_runner.sendSingleDigital('blankingall', True) self.AOTF_runner.sendSingleAnalog(AOTF_channel_key, self.imaging_conditions[AOTF_channel_key]) # Snap an image from camera self.camera_image = self.HamamatsuCam_ins.SnapImage(self.imaging_conditions['exposure_time']) time.sleep(0.5) # Set back AOTF self.AOTF_runner.sendSingleDigital('blankingall', False) self.AOTF_runner.sendSingleAnalog(AOTF_channel_key, 0) plt.figure() plt.imshow(self.camera_image) plt.show() if False: with skimtiff.TiffWriter(os.path.join(r'M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2021-03-06 Camera AF\beads', str(obj_position).replace(".", "_")+ '.tif')) as tif: tif.save(self.camera_image.astype('float32'), compress=0) degree_of_focus = ProcessImage.variance_of_laplacian(self.camera_image.astype('float32')) time.sleep(0.2) return degree_of_focus