def __init__(self, value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13, value14, findcontour_thres, contour_dilationpara, cell_region_opening, cell_region_closing, saving_dir): # Settings for sample stage movement self.ludlStage = LudlStage("COM6") self.UI_row_start_stagescan = value1 self.UI_row_end_stagescan = value2 self.UI_column_start_stagescan = value3 self.UI_column_end_stagescan = value4 self.UI_step_stagescan = value5 # Settings for Daq self.samplingrate = value6 self.analogsignals = value7 # self.PMT_data_index_array = PMT_data_index_array self.digitalsignals = value8 self.readinchannels = value9 # Settings for image analysis and selection self.selected_num = value10 self.smallestsize = value11 self.opening_factor = value12 self.closing_factor = value13 self.binary_adaptive_block_size = value14 self.findcontour_thres = findcontour_thres self.contour_dilationpara = contour_dilationpara self.cell_region_opening = cell_region_opening self.cell_region_closing = cell_region_closing self.saving_dir = saving_dir
class ShowTopCellsThread(QThread): PMTimageDictMeasurement = pyqtSignal( object) #The signal for the measurement, we can connect to this signal def __init__(self, GeneralSettingDict, RankedAllCellProperties, FinalMergedCoords, IndexLookUpCellPropertiesDict, PMTimage, MatdisplayFigureTopGuys, *args, **kwargs): super().__init__(*args, **kwargs) self.GeneralSettingDict = GeneralSettingDict self.RankedAllCellProperties = RankedAllCellProperties self.CurrentPos = FinalMergedCoords self.IndexLookUpCellPropertiesDict = IndexLookUpCellPropertiesDict self.ShowTopCellImg = PMTimage self.MatdisplayFigureTopGuys = MatdisplayFigureTopGuys self.IndexLookUpCellPropertiesDictRow = self.IndexLookUpCellPropertiesDict[ 'row_{}_column_{}'.format(self.CurrentPos[0], self.CurrentPos[1])][0] self.IndexLookUpCellPropertiesDictCol = self.IndexLookUpCellPropertiesDict[ 'row_{}_column_{}'.format(self.CurrentPos[0], self.CurrentPos[1])][1] self.ludlStage = LudlStage("COM6") def run(self): self.TopCellAx = self.MatdisplayFigureTopGuys.add_subplot(111) print('-----------------------------------') #stage movement self.ludlStage.moveAbs(self.CurrentPos[0], self.CurrentPos[1]) time.sleep(1) S = ImageAnalysis( self.ShowTopCellImg, self.ShowTopCellImg ) #The same as ImageAnalysis(Data_dict_0[Pic_name], Data_dict_1[Pic_name]), call the same image with same dictionary index. v1, v2, mask_1, mask_2, thres = S.applyMask( self.GeneralSettingDict['openingfactor'], self.GeneralSettingDict['closingfactor'], self.GeneralSettingDict['binary_adaptive_block_size']) S.showlabel_with_rank_givenAx( self.GeneralSettingDict['smallestsize'], mask_1, v1, self.IndexLookUpCellPropertiesDictRow, self.IndexLookUpCellPropertiesDictCol, self.RankedAllCellProperties, 'Mean intensity in contour', self.GeneralSettingDict['selectnum'], self.TopCellAx) print('-----------------------------------')
def __init__(self, value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13): # Settings for stage scan self.ludlStage = LudlStage("COM7") self.UI_row_start_stagescan = value1 self.UI_row_end_stagescan = value2 self.UI_column_start_stagescan = value3 self.UI_column_end_stagescan = value4 self.UI_step_stagescan = value5 self.UI_Daq_sample_rate_stagescan = value6 self.UI_voltXMin_stagescan = value7 self.UI_voltXMax_stagescan = value8 self.UI_voltYMin_stagescan = value9 self.UI_voltYMax_stagescan = value10 self.UI_Value_xPixels_stagescan = value11 self.UI_Value_yPixels_stagescan = value12 self.UI_Value_averagenum_stagescan = value13
def __init__(self, RoundQueueDict, RoundCoordsDict, GeneralSettingDict, *args, **kwargs): super().__init__(*args, **kwargs) self.RoundQueueDict = RoundQueueDict self.RoundCoordsDict = RoundCoordsDict self.GeneralSettingDict = GeneralSettingDict self.ludlStage = LudlStage("COM6") self.PMTimageDict = {} for i in range(int(len(self.RoundQueueDict) / 2)): # initial the nested PMTimageDict dictionary. self.PMTimageDict['RoundPackage_{}'.format(i + 1)] = {} self.clock_source = 'Dev1 as clock source' # Should be set by GUI. self.scansavedirectory = self.GeneralSettingDict['savedirectory']
class StagemovementRelativeThread(QThread): current_position = pyqtSignal(np.ndarray) def __init__(self, xRel, yRel, *args, **kwargs): super().__init__(*args, **kwargs) self.ludlStage = LudlStage("COM6") self.xRel = xRel self.yRel = yRel def run(self): self.ludlStage.moveRel(self.xRel, self.yRel) time.sleep(1) self.xPosition, self.yPosition = self.ludlStage.getPos() self.current_position_array = np.array( [self.xPosition, self.yPosition]) #print(self.current_position_array) self.current_position.emit(self.current_position_array)
def __init__(self, GeneralSettingDict, RankedAllCellProperties, FinalMergedCoords, IndexLookUpCellPropertiesDict, PMTimage, MatdisplayFigureTopGuys, *args, **kwargs): super().__init__(*args, **kwargs) self.GeneralSettingDict = GeneralSettingDict self.RankedAllCellProperties = RankedAllCellProperties self.CurrentPos = FinalMergedCoords self.IndexLookUpCellPropertiesDict = IndexLookUpCellPropertiesDict self.ShowTopCellImg = PMTimage self.MatdisplayFigureTopGuys = MatdisplayFigureTopGuys self.IndexLookUpCellPropertiesDictRow = self.IndexLookUpCellPropertiesDict[ 'row_{}_column_{}'.format(self.CurrentPos[0], self.CurrentPos[1])][0] self.IndexLookUpCellPropertiesDictCol = self.IndexLookUpCellPropertiesDict[ 'row_{}_column_{}'.format(self.CurrentPos[0], self.CurrentPos[1])][1] self.ludlStage = LudlStage("COM6")
class StagemovementAbsoluteThread(QThread): current_position = pyqtSignal(np.ndarray) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.ludlStage = LudlStage("COM6") def SetTargetPos(self, xAbs, yAbs): self.xAbs = xAbs self.yAbs = yAbs def run(self): self.ludlStage.moveAbs(self.xAbs, self.yAbs) time.sleep(1) self.xPosition, self.yPosition = self.ludlStage.getPos() self.current_position_array = np.array( [self.xPosition, self.yPosition]) #print(self.current_position_array) self.current_position.emit(self.current_position_array)
class Stagescan(): def __init__(self, value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13): # Settings for stage scan self.ludlStage = LudlStage("COM7") self.UI_row_start_stagescan = value1 self.UI_row_end_stagescan = value2 self.UI_column_start_stagescan = value3 self.UI_column_end_stagescan = value4 self.UI_step_stagescan = value5 self.UI_Daq_sample_rate_stagescan = value6 self.UI_voltXMin_stagescan = value7 self.UI_voltXMax_stagescan = value8 self.UI_voltYMin_stagescan = value9 self.UI_voltYMax_stagescan = value10 self.UI_Value_xPixels_stagescan = value11 self.UI_Value_yPixels_stagescan = value12 self.UI_Value_averagenum_stagescan = value13 def start(self): # settings for scanning index position_index=[] row_start = self.UI_row_start_stagescan #position index start number row_end = self.UI_row_end_stagescan #end number column_start = self.UI_column_start_stagescan column_end = self.UI_column_end_stagescan step = self.UI_step_stagescan #length of each step, 1500 for -5~5V FOV #Settings for A/D output Daq_sample_rate = self.UI_Daq_sample_rate_stagescan #Scanning settings Value_voltXMin = self.UI_voltXMin_stagescan Value_voltXMax = self.UI_voltXMax_stagescan Value_voltYMin = self.UI_voltYMin_stagescan Value_voltYMax = self.UI_voltYMax_stagescan Value_xPixels = self.UI_Value_xPixels_stagescan Value_yPixels = self.UI_Value_yPixels_stagescan averagenum =self.UI_Value_averagenum_stagescan #Generate galvo samples samples_1, samples_2= wavegenerator.waveRecPic(sampleRate = Daq_sample_rate, imAngle = 0, voltXMin = Value_voltXMin, voltXMax = Value_voltXMax, voltYMin = Value_voltYMin, voltYMax = Value_voltYMax, xPixels = Value_xPixels, yPixels = Value_yPixels, sawtooth = True) #ScanArrayX = wavegenerator.xValuesSingleSawtooth(sampleRate = Daq_sample_rate, voltXMin = Value_voltXMin, voltXMax = Value_voltXMax, xPixels = Value_xPixels, sawtooth = True) Totalscansamples = len(samples_1)*averagenum # Calculate number of samples to feed to scanner, by default it's one frame ScanArrayXnum = int (len(samples_1)/Value_yPixels) # number of samples of each individual line of x scanning Galvo_samples = np.vstack((samples_1,samples_2)) # #generate dig samples One_Dig_samples = np.append(np.ones(25000,dtype=bool), np.zeros(25000,dtype=bool)) Dig_repeat_times = int(Totalscansamples/len(One_Dig_samples)) Dig_samples = [] for i in range(Dig_repeat_times): Dig_samples = np.append(Dig_samples, One_Dig_samples) Dataholder = np.zeros(Totalscansamples) with nidaqmx.Task() as slave_Task3, nidaqmx.Task() as master_Task, nidaqmx.Task() as slave_Task2: #slave_Task3 = nidaqmx.Task() slave_Task3.ao_channels.add_ao_voltage_chan("/Dev1/ao0:1") master_Task.ai_channels.add_ai_voltage_chan("/Dev1/ai0") slave_Task2.do_channels.add_do_chan("/Dev1/port0/line25") #slave_Task3.ao_channels.add_ao_voltage_chan("/Dev1/ao1") # MultiAnalogchannels slave_Task3.timing.cfg_samp_clk_timing(Daq_sample_rate, source='ai/SampleClock', sample_mode= AcquisitionType.FINITE, samps_per_chan=Totalscansamples) slave_Task3.triggers.sync_type.SLAVE = True # Analoginput master_Task.timing.cfg_samp_clk_timing(Daq_sample_rate, sample_mode= AcquisitionType.FINITE, samps_per_chan=Totalscansamples) master_Task.triggers.sync_type.MASTER = True # Digital output slave_Task2.timing.cfg_samp_clk_timing(Daq_sample_rate, source='ai/SampleClock', sample_mode= AcquisitionType.FINITE, samps_per_chan=Totalscansamples) slave_Task2.triggers.sync_type.SLAVE = True AnalogWriter = nidaqmx.stream_writers.AnalogMultiChannelWriter(slave_Task3.out_stream, auto_start= False) AnalogWriter.auto_start = False DigitalWriter = nidaqmx.stream_writers.DigitalSingleChannelWriter(slave_Task2.out_stream, auto_start= False) DigitalWriter.auto_start = False reader = AnalogSingleChannelReader(master_Task.in_stream) time.sleep(2) RepeatNum = 0 Data_dict_0 = {} loopnum = 0 for i in range(row_start, row_end, step): position_index.append(i) for j in range(column_start, column_end, step): position_index.append(j) print ('-----------------------------------') print (position_index) #stage movement self.ludlStage.moveAbs(i,j) time.sleep(1) AnalogWriter .write_many_sample(Galvo_samples, timeout=16.0) DigitalWriter.write_one_sample_one_line(Dig_samples, timeout=16.0) slave_Task3.start() slave_Task2.start() reader.read_many_sample(Dataholder, number_of_samples_per_channel = Totalscansamples, timeout=16.0) Dataholder_average = np.mean(Dataholder.reshape(averagenum, -1), axis=0) data1 = np.reshape(Dataholder_average, (Value_yPixels, ScanArrayXnum)) slave_Task3.wait_until_done() slave_Task2.wait_until_done() master_Task.wait_until_done() Pic_name =str(i)+str(j) print('Picture index name:'+str(RepeatNum)+'|'+str(i)+'|'+str(j)) Data_dict_0[Pic_name] = data1[:,:Value_yPixels]*-1 Localimg = Image.fromarray(Data_dict_0[Pic_name]) #generate an image object Localimg.save(str(RepeatNum)+Pic_name+'out_1st.tif') #save as tif plt.figure(loopnum) plt.imshow(Data_dict_0[Pic_name], cmap = plt.cm.gray) plt.show() slave_Task3.stop() master_Task.stop() slave_Task2.stop() time.sleep(1) self.ludlStage.getPos() loopnum = loopnum+1 del position_index[-1] print ('---------------^^^^---------------') position_index=[] print ('Finish round 1') time.sleep(1) self.ludlStage.moveAbs(row_start,column_start) #move to the start as preparation time.sleep(2) input("Press Enter to continue...") Data_dict_1 = {} #dictionary for images All_cell_properties_dict = {} All_cell_properties = [] cp_end_index = -1 cp_index_dict = {} #dictionary for each cell properties RepeatNum = 1 loopnum = 0 for i in range(row_start, row_end, step): position_index.append(i) for j in range(column_start, column_end, step): position_index.append(j) print ('----(´・ω・`)---------vvv-Start-vvv-------(´・ω・`)--------') print (position_index) #stage movement self.ludlStage.moveAbs(i,j) time.sleep(1) AnalogWriter .write_many_sample(Galvo_samples, timeout=16.0) DigitalWriter.write_one_sample_one_line(Dig_samples, timeout=16.0) slave_Task3.start() slave_Task2.start() reader.read_many_sample(Dataholder, number_of_samples_per_channel = Totalscansamples, timeout=16.0) Dataholder_average = np.mean(Dataholder.reshape(averagenum, -1), axis=0) data1 = np.reshape(Dataholder_average, (Value_yPixels, ScanArrayXnum)) slave_Task3.wait_until_done() slave_Task2.wait_until_done() master_Task.wait_until_done() Pic_name = str(i)+str(j) print('Picture index name:'+str(RepeatNum)+'|'+str(i)+'|'+str(j)) Data_dict_1[Pic_name] = data1[:,:Value_yPixels]*-1 Localimg = Image.fromarray(Data_dict_1[Pic_name]) #generate an image object Localimg.save(str(RepeatNum)+Pic_name+'out.tif') #save as tif plt.figure(loopnum) plt.imshow(Data_dict_1[Pic_name], cmap = plt.cm.gray) plt.show() time.sleep(1) # Image processing #kkk = Data_dict_1[Pic_name]/Data_dict_0[Pic_name] S = ImageAnalysis(Data_dict_0[Pic_name], Data_dict_1[Pic_name]) v1, v2, bw, thres = S.applyMask() #R = S.ratio(v1, v2) L, cp, coutourmask, coutourimg, sing = S.get_intensity_properties(100, bw, v2, thres, v2, i, j, 7) S.showlabel(100, bw, v2, thres, i, j, cp) #print (L) print (cp) All_cell_properties_dict[loopnum] = cp if loopnum == 0: All_cell_properties = All_cell_properties_dict[0] if loopnum != 0: All_cell_properties = np.append(All_cell_properties, All_cell_properties_dict[loopnum], axis=0) cp_end_index = cp_end_index + len(cp) cp_start_index = cp_end_index - len(cp) +1 cp_index_dict[Pic_name] = [cp_start_index, cp_end_index] #get the location of individual cp index & put in dictionary, as they are stored in sequence. time.sleep(2) slave_Task3.stop() master_Task.stop() slave_Task2.stop() time.sleep(1) self.ludlStage.getPos() loopnum = loopnum+1 del position_index[-1] print ('-----(⊙⊙!)-----^^^^END^^^------结束-----') position_index=[] print ('End of round 2') #print(All_cell_properties) time.sleep(2) #Sorting and trace back original_dtype = np.dtype(All_cell_properties.dtype.descr + [('Original_sequence', '<i4')]) original_cp = np.zeros(All_cell_properties.shape, dtype=original_dtype) original_cp['Row index'] = All_cell_properties['Row index'] original_cp['Column index'] = All_cell_properties['Column index'] original_cp['Mean intensity'] = All_cell_properties['Mean intensity'] original_cp['Circularity'] = All_cell_properties['Circularity'] original_cp['Mean intensity in contour'] = All_cell_properties['Mean intensity in contour'] original_cp['Original_sequence'] = list(range(0, len(All_cell_properties))) #print (original_cp['Mean intensity in contour']) #print('*********************sorted************************') #sort sortedcp = np.flip(np.sort(original_cp, order='Mean intensity in contour'), 0) selected_num = 10 #determine how many we want #unsorted_cp = All_cell_properties[:selected_num] #targetcp = sortedcp[:selected_num] rank_dtype = np.dtype(sortedcp.dtype.descr + [('Ranking', '<i4')]) ranked_cp = np.zeros(sortedcp.shape, dtype=rank_dtype) ranked_cp['Row index'] = sortedcp['Row index'] ranked_cp['Column index'] = sortedcp['Column index'] ranked_cp['Mean intensity'] = sortedcp['Mean intensity'] ranked_cp['Circularity'] = sortedcp['Circularity'] ranked_cp['Mean intensity in contour'] = sortedcp['Mean intensity in contour'] ranked_cp['Original_sequence'] = sortedcp['Original_sequence'] ranked_cp['Ranking'] = list(range(0, len(All_cell_properties))) withranking_cp = np.sort(ranked_cp, order='Original_sequence') #print (ranked_cp) #print('***********************Original sequence with ranking**************************') # All the cells are ranked, now we find the desired group and their position indexs, call the images and show labels of # these who meet the requirements, omitting bad ones. #get the index cell_properties_selected_hits = ranked_cp[0:selected_num] cell_properties_selected_hits_index_sorted = np.sort(cell_properties_selected_hits, order=['Row index', 'Column index']) index_samples = np.vstack((cell_properties_selected_hits_index_sorted['Row index'],cell_properties_selected_hits_index_sorted['Column index'])) merged_index_samples = index_samples[:,0] #consider these after 1st one for i in range(1, len(index_samples[0])): #print(index_samples[:,i][0] - index_samples[:,i-1][0]) if index_samples[:,i][0] != index_samples[:,i-1][0] or index_samples[:,i][1] != index_samples[:,i-1][1]: merged_index_samples = np.append(merged_index_samples, index_samples[:,i], axis=0) merged_index_samples = merged_index_samples.reshape(-1, 2) # 1st column=i, 2nd column=j # then we move back to each of this positions and show the labels input("Press Enter to continue...") print(merged_index_samples) print(withranking_cp) for i in range(len(merged_index_samples)): print ('-----------------------------------') #stage movement self.ludlStage.moveAbs(merged_index_samples[i,:].tolist()[0],merged_index_samples[i,:].tolist()[1]) time.sleep(1) Pic_name_trace = str(merged_index_samples[i,:].tolist()[0])+str(merged_index_samples[i,:].tolist()[1]) S = ImageAnalysis(Data_dict_0[Pic_name_trace], Data_dict_1[Pic_name_trace]) #The same as ImageAnalysis(Data_dict_0[Pic_name], Data_dict_1[Pic_name]), call the same image with same dictionary index. v1, v2, bw, thres = S.applyMask() S.showlabel_with_rank(100, bw, v2, cp_index_dict[Pic_name_trace][0], cp_index_dict[Pic_name_trace][1], withranking_cp, 'Mean intensity in contour', 10) print ( ' i: '+ str(merged_index_samples[i,:].tolist()[0]) + ' j: '+ str(merged_index_samples[i,:].tolist()[1])) print ('-----------------------------------') input("Press Enter to continue...")
class Stagescan(): def __init__(self, value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13, value14, findcontour_thres, contour_dilationpara, cell_region_opening, cell_region_closing, saving_dir): # Settings for sample stage movement self.ludlStage = LudlStage("COM6") self.UI_row_start_stagescan = value1 self.UI_row_end_stagescan = value2 self.UI_column_start_stagescan = value3 self.UI_column_end_stagescan = value4 self.UI_step_stagescan = value5 # Settings for Daq self.samplingrate = value6 self.analogsignals = value7 # self.PMT_data_index_array = PMT_data_index_array self.digitalsignals = value8 self.readinchannels = value9 # Settings for image analysis and selection self.selected_num = value10 self.smallestsize = value11 self.opening_factor = value12 self.closing_factor = value13 self.binary_adaptive_block_size = value14 self.findcontour_thres = findcontour_thres self.contour_dilationpara = contour_dilationpara self.cell_region_opening = cell_region_opening self.cell_region_closing = cell_region_closing self.saving_dir = saving_dir def start(self): # settings for scanning index position_index = [] row_start = self.UI_row_start_stagescan #position index start number row_end = self.UI_row_end_stagescan #end number column_start = self.UI_column_start_stagescan column_end = self.UI_column_end_stagescan step = self.UI_step_stagescan #length of each step, 1500 for -5~5V FOV #Settings for A/D output Daq_sample_rate = self.samplingrate RepeatNum = 0 Data_dict_0 = {} loopnum = 0 for i in range(row_start, row_end, step): position_index.append(i) for j in range(column_start, column_end, step): position_index.append(j) print('-----------------------------------') print(position_index) #stage movement self.ludlStage.moveAbs(i, j) time.sleep(1) self.analog_to_feed = self.analogsignals.copy() doit = execute_analog_readin_optional_digital_thread() doit.set_waves(Daq_sample_rate, self.analog_to_feed, self.digitalsignals, self.readinchannels) doit.start() data1 = doit.read() Pic_name = str(i) + '_' + str(j) print('Picture index name:' + str(RepeatNum) + '|' + str(i) + '|' + str(j)) # Assume that we are using 5v Data_dict_0[Pic_name] = data1[:, 15: 515] #data1[:,:Value_yPixels]*-1 Localimg = Image.fromarray( Data_dict_0[Pic_name]) #generate an image object Localimg.save( os.path.join(self.saving_dir, str(RepeatNum) + Pic_name + 'out_1st.tif')) #save as tif plt.figure(loopnum) plt.imshow(Data_dict_0[Pic_name], cmap=plt.cm.gray) plt.show() time.sleep(0.3) #self.ludlStage.getPos() loopnum = loopnum + 1 del position_index[-1] print('---------------^^^^---------------') position_index = [] print('Finish round 1') time.sleep(1) self.ludlStage.moveAbs(row_start, column_start) #move to the start as preparation time.sleep(2) input("Press Enter to continue...") Data_dict_1 = {} #dictionary for images All_cell_properties_dict = {} All_cell_properties = [] cp_end_index = -1 cp_index_dict = {} #dictionary for each cell properties RepeatNum = 1 loopnum = 0 for i in range(row_start, row_end, step): position_index.append(i) for j in range(column_start, column_end, step): position_index.append(j) print( '----(´・ω・`)---------vvv-Start-vvv-------(´・ω・`)--------') print(position_index) #stage movement self.ludlStage.moveAbs(i, j) time.sleep(1) self.analog_to_feed = self.analogsignals.copy() doit = execute_analog_readin_optional_digital_thread() doit.set_waves(Daq_sample_rate, self.analog_to_feed, self.digitalsignals, self.readinchannels) doit.start() data1 = doit.read() Pic_name = str(i) + '_' + str(j) print('Picture index name:' + str(RepeatNum) + '|' + str(i) + '|' + str(j)) Data_dict_1[Pic_name] = data1[:, 15:515] #[:,:Value_yPixels]*-1 Localimg = Image.fromarray( Data_dict_1[Pic_name]) #generate an image object Localimg.save( os.path.join(self.saving_dir, str(RepeatNum) + Pic_name + 'out.tif')) #save as tif plt.figure(loopnum) plt.imshow(Data_dict_1[Pic_name], cmap=plt.cm.gray) plt.show() time.sleep(0.3) # Image processing #kkk = Data_dict_1[Pic_name]/Data_dict_0[Pic_name] S = ImageAnalysis(Data_dict_0[Pic_name], Data_dict_1[Pic_name]) v1, v2, mask_1, mask_2, thres = S.applyMask( self.opening_factor, self.closing_factor, self. binary_adaptive_block_size) #v1 = Thresholded whole image #R = S.ratio(v1, v2) cp, coutourmask, coutourimg, intensityimage_intensity, contour_change_ratio = S.get_intensity_properties( self.smallestsize, mask_1, thres, v1, v2, i, j, self.findcontour_thres, self.contour_dilationpara, self.cell_region_opening, self.cell_region_closing) S.showlabel(self.smallestsize, mask_1, v1, thres, i, j, cp) #print (L) print(cp) All_cell_properties_dict[loopnum] = cp if loopnum == 0: All_cell_properties = All_cell_properties_dict[0] if loopnum != 0: All_cell_properties = np.append( All_cell_properties, All_cell_properties_dict[loopnum], axis=0) cp_end_index = cp_end_index + len(cp) cp_start_index = cp_end_index - len(cp) + 1 cp_index_dict[Pic_name] = [ cp_start_index, cp_end_index ] #get the location of individual cp index & put in dictionary, as they are stored in sequence. time.sleep(0.3) #self.ludlStage.getPos() loopnum = loopnum + 1 del position_index[-1] print('-----(⊙⊙!)-----^^^^END^^^------结束-----') position_index = [] print('End of round 2') #print(All_cell_properties) time.sleep(2) #Sorting and trace back #------------------------------------------CAN use 'import numpy.lib.recfunctions as rfn' to append field-------------- original_cp = rfn.append_fields(All_cell_properties, 'Original_sequence', list(range(0, len(All_cell_properties))), usemask=False) #print (original_cp['Mean intensity in contour']) #print('*********************sorted************************') #sort sortedcp = S.sort_using_weight(original_cp, 'Change', 'Mean intensity in contour', 0.5, 0.5) #sortedcp = np.flip(np.sort(original_cp, order='Mean intensity in contour'), 0) #selected_num = 10 #determine how many we want #unsorted_cp = All_cell_properties[:selected_num] #targetcp = sortedcp[:selected_num] ranked_cp = rfn.append_fields(sortedcp, 'Ranking', list(range(0, len(All_cell_properties))), usemask=False) withranking_cp = np.sort(ranked_cp, order='Original_sequence') #print (ranked_cp) #print('***********************Original sequence with ranking**************************') # All the cells are ranked, now we find the desired group and their position indexs, call the images and show labels of # these who meet the requirements, omitting bad ones. #get the index cell_properties_selected_hits = ranked_cp[0:self.selected_num] cell_properties_selected_hits_index_sorted = np.sort( cell_properties_selected_hits, order=['Row index', 'Column index']) index_samples = np.vstack( (cell_properties_selected_hits_index_sorted['Row index'], cell_properties_selected_hits_index_sorted['Column index'])) merged_index_samples = index_samples[:, 0] #consider these after 1st one for i in range(1, len(index_samples[0])): #print(index_samples[:,i][0] - index_samples[:,i-1][0]) if index_samples[:, i][0] != index_samples[:, i - 1][ 0] or index_samples[:, i][1] != index_samples[:, i - 1][1]: merged_index_samples = np.append(merged_index_samples, index_samples[:, i], axis=0) merged_index_samples = merged_index_samples.reshape( -1, 2) # 1st column=i, 2nd column=j # then we move back to each of this positions and show the labels input("Press Enter to continue...") print(merged_index_samples) print(withranking_cp) for i in range(len(merged_index_samples)): print('-----------------------------------') #stage movement self.ludlStage.moveAbs(merged_index_samples[i, :].tolist()[0], merged_index_samples[i, :].tolist()[1]) time.sleep(1) Pic_name_trace = str( merged_index_samples[i, :].tolist()[0]) + '_' + str( merged_index_samples[i, :].tolist()[1]) S = ImageAnalysis( Data_dict_0[Pic_name_trace], Data_dict_1[Pic_name_trace] ) #The same as ImageAnalysis(Data_dict_0[Pic_name], Data_dict_1[Pic_name]), call the same image with same dictionary index. v1, v2, mask_1, mask_2, thres = S.applyMask( self.opening_factor, self.closing_factor, self.binary_adaptive_block_size) S.showlabel_with_rank(self.smallestsize, mask_1, v1, cp_index_dict[Pic_name_trace][0], cp_index_dict[Pic_name_trace][1], withranking_cp, 'Mean intensity in contour', self.selected_num) print(' i: ' + str(merged_index_samples[i, :].tolist()[0]) + ' j: ' + str(merged_index_samples[i, :].tolist()[1])) print('-----------------------------------') input("Press Enter to continue...")
# -*- coding: utf-8 -*- """ Created on Mon Feb 18 11:42:50 2019 @author: lhuismans """ import time from stage import LudlStage #import visa #First initialize the stage, the correct COM-port has to be specified. I think you can find the COM# under device manager. ludlStage = LudlStage("COM6") #ludlStage.send_end='True' #ludlStage.delay = 0.2 #ludlStage.baud_rate=9600 #ludlStage.read_termination = '\r' #ludlStage.write_termination='\r' #Now the stage is initialized functions can be past to it. In the stage.py file each function is explained and it is specified what parameters it takes. #ludlStage.Joystick(True) #ludlStage.timeout = 0.1 #ludlStage.delay = 0.1 i= 0 j= 0 ludlStage.moveAbs(i,j) #ludlStage.moveRel(i,j) #time.sleep(1) ii, jj =ludlStage.getPos() #j increase = fov in labview shifts down
class ScanningExecutionThread(QThread): ScanningResult = pyqtSignal( np.ndarray, np.ndarray, object, object) #The signal for the measurement, we can connect to this signal def __init__(self, RoundQueueDict, RoundCoordsDict, GeneralSettingDict, *args, **kwargs): super().__init__(*args, **kwargs) self.RoundQueueDict = RoundQueueDict self.RoundCoordsDict = RoundCoordsDict self.GeneralSettingDict = GeneralSettingDict self.ludlStage = LudlStage("COM6") self.PMTimageDict = {} for i in range(int(len(self.RoundQueueDict) / 2)): # initial the nested PMTimageDict dictionary. self.PMTimageDict['RoundPackage_{}'.format(i + 1)] = {} self.clock_source = 'Dev1 as clock source' # Should be set by GUI. self.scansavedirectory = self.GeneralSettingDict['savedirectory'] def run(self): # if len(self.GeneralSettingDict['FocusCorrectionMatrixDict']) > 0:# if focus correction matrix was generated. print( '----------------------Starting to connect the Objective motor-------------------------' ) self.pi_device_instance = PIMotor() print('Objective motor connected.') self.ObjCurrentPos = self.pi_device_instance.pidevice.qPOS( self.pi_device_instance.pidevice.axes) for EachRound in range( int(len(self.RoundQueueDict) / 2) ): # EachRound is the round sequence number starting from 0, while the actual number used in dictionary is 1. print( '----------------------------------------------------------------------------' ) print( 'Below is Round {}.'.format(EachRound + 1) ) # EachRound+1 is the corresponding round number when setting the dictionary starting from round 1. #--------------------------------------------------------Unpack the settings for each round--------------------------------------------------------------------------- CoordOrder = 0 # Counter for n th coordinates, for appending cell properties array. CellPropertiesDict = {} # All_cell_properties = [] cp_end_index = -1 self.IndexLookUpCellPropertiesDict = { } #look up dictionary for each cell properties #Unpack the focus stack information. ZStackinfor = self.GeneralSettingDict['FocusStackInfoDict'][ 'RoundPackage_{}'.format(EachRound + 1)] self.ZStackNum = int(ZStackinfor[ZStackinfor.index('Focus') + 5]) self.ZStackStep = float(ZStackinfor[ZStackinfor.index('Being') + 5:len(ZStackinfor)]) CoordsNum = int( len(self.RoundCoordsDict['CoordsPackage_{}'.format(EachRound + 1)]) / 2) #Each pos has 2 coords for EachCoord in range(CoordsNum): print('Round {}. Current index: {}.'.format( EachRound + 1, self.RoundCoordsDict['CoordsPackage_{}'.format( EachRound + 1)][EachCoord * 2:EachCoord * 2 + 2])) #-------------------------------------------Stage movement----------------------------------------------------- RowIndex = int(self.RoundCoordsDict['CoordsPackage_{}'.format( EachRound + 1)][EachCoord * 2:EachCoord * 2 + 2][0]) ColumnIndex = int( self.RoundCoordsDict['CoordsPackage_{}'.format( EachRound + 1)][EachCoord * 2:EachCoord * 2 + 2][1]) self.ludlStage.moveAbs(RowIndex, ColumnIndex) time.sleep(1) #-------------------------------------------Adjust focus position---------------------------------------- if len(self.GeneralSettingDict['FocusCorrectionMatrixDict'] ) > 0: FocusPosArray = self.GeneralSettingDict[ 'FocusCorrectionMatrixDict']['RoundPackage_{}'.format( EachRound + 1)] # print(FocusPosArray) FocusPos = FocusPosArray[EachCoord] print('Target focus pos: '.format(FocusPos)) pos = PIMotor.move(self.pi_device_instance.pidevice, FocusPos) self.ObjCurrentPos = self.pi_device_instance.pidevice.qPOS( self.pi_device_instance.pidevice.axes) print("Current position: {:.4f}".format( self.ObjCurrentPos['1'])) time.sleep(0.5) #-------------------------------------------Get the z stack objective positions ready---------------------------------------------------------------------------- ZStacklinspaceStart = self.ObjCurrentPos['1'] - math.floor( self.ZStackNum / 2) * self.ZStackStep ZStacklinspaceEnd = self.ObjCurrentPos['1'] + ( self.ZStackNum - math.floor(self.ZStackNum / 2) - 1) * self.ZStackStep ZStackPosList = np.linspace(ZStacklinspaceStart, ZStacklinspaceEnd, num=self.ZStackNum) #-------------------------------------------Execute waveform packages------------------------------------ self.WaveforpackageNum = int( len(self.RoundQueueDict['RoundPackage_{}'.format( EachRound + 1)])) #Execute each individual waveform package print( '*******************************************Round {}. Current index: {}.**************************************************' .format( EachRound + 1, self.RoundCoordsDict['CoordsPackage_{}'.format( EachRound + 1)][EachCoord * 2:EachCoord * 2 + 2])) for EachZStackPos in range( self.ZStackNum): # Move to Z stack focus if self.ZStackNum > 1: self.ZStackOrder = EachZStackPos + 1 # Here the first one is 1, not starting from 0. FocusPos = ZStackPosList[EachZStackPos] print('Target focus pos: {}'.format(FocusPos)) pos = PIMotor.move(self.pi_device_instance.pidevice, FocusPos) self.ObjCurrentPosInStack = self.pi_device_instance.pidevice.qPOS( self.pi_device_instance.pidevice.axes) print("Current position: {:.4f}".format( self.ObjCurrentPosInStack['1'])) time.sleep(0.3) else: self.ZStackOrder = 1 for EachWaveform in range(self.WaveforpackageNum): WaveformPackageToBeExecute = self.RoundQueueDict[ 'RoundPackage_{}'.format(EachRound + 1)][ 'WaveformPackage_{}'.format(EachWaveform + 1)] WaveformPackageGalvoInfor = self.RoundQueueDict[ 'GalvoInforPackage_{}'.format(EachRound + 1)][ 'GalvoInfor_{}'.format(EachWaveform + 1)] self.readinchan = WaveformPackageToBeExecute[3] self.RoundWaveformIndex = [ EachRound + 1, EachWaveform + 1 ] # first is current round number, second is current waveform package number. self.CurrentPosIndex = [RowIndex, ColumnIndex] if WaveformPackageGalvoInfor != 'NoGalvo': # Unpack the information of galvo scanning. self.readinchan = WaveformPackageGalvoInfor[0] self.repeatnum = WaveformPackageGalvoInfor[1] self.PMT_data_index_array = WaveformPackageGalvoInfor[ 2] self.averagenum = WaveformPackageGalvoInfor[3] self.lenSample_1 = WaveformPackageGalvoInfor[4] self.ScanArrayXnum = WaveformPackageGalvoInfor[5] if self.clock_source == 'Dev1 as clock source': self.adcollector = execute_analog_readin_optional_digital_thread( ) self.adcollector.set_waves( WaveformPackageToBeExecute[0], WaveformPackageToBeExecute[1], WaveformPackageToBeExecute[2], WaveformPackageToBeExecute[3] ) #[0] = sampling rate, [1] = analogcontainer_array, [2] = digitalcontainer_array, [3] = readinchan self.adcollector.collected_data.connect( self.ProcessData) self.adcollector.run() #self.ai_dev_scaling_coeff = self.adcollector.get_ai_dev_scaling_coeff() elif self.clock_source == 'Cam as clock source': self.adcollector = execute_analog_and_readin_digital_optional_camtrig_thread( ) self.adcollector.set_waves( WaveformPackageToBeExecute[0], WaveformPackageToBeExecute[1], WaveformPackageToBeExecute[2], WaveformPackageToBeExecute[3]) self.adcollector.collected_data.connect( self.ProcessData) self.adcollector.run() time.sleep(0.5) # Wait for receiving data to be done. time.sleep(0.3) print( '*************************************************************************************************************************' ) # Image anaylsis part. if EachRound + 1 == self.GeneralSettingDict[ 'AftRoundNum']: # When it's the round for after Kcl assay image acquisition. time.sleep( 1 ) # Here to make sure self.ProcessData is run before Image anaylsis part. self.ProcessData and this part are started at the same time. print('Image analysis start.') #------------------------------------------------------------------ Image processing ---------------------------------------------------------------------- #Pull the Bef and Aft image from the dictionary ImageBef = self.PMTimageDict['RoundPackage_{}'.format( self.GeneralSettingDict['BefRoundNum'])][ 'row_{}_column_{}_stack1'.format( RowIndex, ColumnIndex)] ImageAft = self.PMTimageDict['RoundPackage_{}'.format( self.GeneralSettingDict['AftRoundNum'])][ 'row_{}_column_{}_stack1'.format( RowIndex, ColumnIndex)] print(ImageAft.shape) # NOT ready for 3d stack try: self.ImageAnalysisInstance = ImageAnalysis( ImageBef, ImageAft) MaskedImageBef, MaskedImageAft, MaskBef, MaskAft, thres = self.ImageAnalysisInstance.applyMask( self.GeneralSettingDict['openingfactor'], self.GeneralSettingDict['closingfactor'], self. GeneralSettingDict['binary_adaptive_block_size'] ) #v1 = Thresholded whole image CellPropertiesArray, coutourmask, coutourimg, intensityimage_intensity, contour_change_ratio = self.ImageAnalysisInstance.get_intensity_properties( self.GeneralSettingDict['smallestsize'], MaskBef, thres, MaskedImageBef, MaskedImageAft, RowIndex, ColumnIndex, self.GeneralSettingDict['self_findcontour_thres'], self.GeneralSettingDict['contour_dilation'], self.GeneralSettingDict['cellopeningfactor'], self.GeneralSettingDict['cellclosingfactor']) self.ImageAnalysisInstance.showlabel( self.GeneralSettingDict['smallestsize'], MaskBef, MaskedImageBef, thres, RowIndex, ColumnIndex, CellPropertiesArray) print(CellPropertiesArray) CellPropertiesDict[CoordOrder] = CellPropertiesArray if CoordOrder == 0: self.AllCellPropertiesDict = CellPropertiesDict[0] if CoordOrder != 0: self.AllCellPropertiesDict = np.append( self.AllCellPropertiesDict, CellPropertiesDict[CoordOrder], axis=0) cp_end_index = cp_end_index + len(CellPropertiesArray) cp_start_index = cp_end_index - len( CellPropertiesArray) + 1 self.IndexLookUpCellPropertiesDict[ 'row_{}_column_{}'.format( RowIndex, ColumnIndex)] = [cp_start_index, cp_end_index] # As cell properties are stored in sequence, the lookup dictionary provides information of to which stage coordinates the cp data in cell properties array belong. except: print('Image analysis failed.') time.sleep(0.3) CoordOrder = CoordOrder + 1 # Sort the cell properties array if EachRound + 1 == self.GeneralSettingDict[ 'AftRoundNum']: # When it's the round for after Kcl assay image acquisition. self.RankedAllCellProperties, self.FinalMergedCoords = self.SortingPropertiesArray( self.AllCellPropertiesDict) try: self.ScanningResult.emit(self.RankedAllCellProperties, self.FinalMergedCoords, self.IndexLookUpCellPropertiesDict, self.PMTimageDict) except: print('Failed to generate cell properties ranking.') try: PIMotor.CloseMotorConnection(self.pi_device_instance.pidevice) print('Objective motor disconnected.') except: pass #--------------------------------------------------------------Reconstruct and save images from 1D recorded array.-------------------------------------------------------------------------------- def ProcessData(self, data_waveformreceived): self.adcollector.save_as_binary(self.scansavedirectory) self.channel_number = len(data_waveformreceived) if self.channel_number == 1: if 'Vp' in self.readinchan: pass elif 'Ip' in self.readinchan: pass elif 'PMT' in self.readinchan: # repeatnum, PMT_data_index_array, averagenum, ScanArrayXnum self.data_collected_0 = data_waveformreceived[0] * -1 self.data_collected_0 = self.data_collected_0[ 0:len(self.data_collected_0) - 1] print(len(self.data_collected_0)) for imageSequence in range(self.repeatnum): try: self.PMT_image_reconstructed_array = self.data_collected_0[ np.where( self.PMT_data_index_array == imageSequence + 1)] # if imageSequence == int(self.repeatnum)-1: # self.PMT_image_reconstructed_array = self.PMT_image_reconstructed_array[0:len(self.PMT_image_reconstructed_array)-1] # Extra one sample at the end. # print(self.PMT_image_reconstructed_array.shape) Dataholder_average = np.mean( self.PMT_image_reconstructed_array.reshape( self.averagenum, -1), axis=0) # print(Dataholder_average.shape) Value_yPixels = int(self.lenSample_1 / self.ScanArrayXnum) self.PMT_image_reconstructed = np.reshape( Dataholder_average, (Value_yPixels, self.ScanArrayXnum)) self.PMT_image_reconstructed = self.PMT_image_reconstructed[:, 50: 550] # Crop size based on: M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2019-12-30 2p beads area test 4um # Stack the arrays into a 3d array if imageSequence == 0: self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed else: self.PMT_image_reconstructed_stack = np.concatenate( (self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed), axis=0) Localimg = Image.fromarray(self.PMT_image_reconstructed ) #generate an image object Localimg.save( os.path.join( self.scansavedirectory, 'Round' + str(self.RoundWaveformIndex[0]) + 'R' + str(self.CurrentPosIndex[0]) + 'C' + str(self.CurrentPosIndex[1]) + '_PMT_' + str(imageSequence) + 'Zpos' + str(self.ZStackOrder) + '.tif')) #save as tif plt.figure() plt.imshow(self.PMT_image_reconstructed, cmap=plt.cm.gray) plt.show() except: print('No.{} image failed to generate.'.format( imageSequence)) elif self.channel_number == 2: if 'PMT' not in self.readinchan: pass elif 'PMT' in self.readinchan: self.data_collected_0 = data_waveformreceived[0] * -1 self.data_collected_0 = self.data_collected_0[ 0:len(self.data_collected_0) - 1] print(len(self.data_collected_0)) for imageSequence in range(self.repeatnum): try: self.PMT_image_reconstructed_array = self.data_collected_0[ np.where( self.PMT_data_index_array == imageSequence + 1)] if imageSequence == int(self.repeatnum) - 1: self.PMT_image_reconstructed_array = self.PMT_image_reconstructed_array[ 0:len(self.PMT_image_reconstructed_array) - 1] # Extra one sample at the end. Dataholder_average = np.mean( self.PMT_image_reconstructed_array.reshape( self.averagenum, -1), axis=0) Value_yPixels = int(self.lenSample_1 / self.ScanArrayXnum) self.PMT_image_reconstructed = np.reshape( Dataholder_average, (Value_yPixels, self.ScanArrayXnum)) self.PMT_image_reconstructed = self.PMT_image_reconstructed[:, 50: 550] # Stack the arrays into a 3d array if imageSequence == 0: self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed else: self.PMT_image_reconstructed_stack = np.concatenate( (self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed), axis=0) Localimg = Image.fromarray(self.PMT_image_reconstructed ) #generate an image object Localimg.save( os.path.join( self.scansavedirectory, 'Round' + str(self.RoundWaveformIndex[0]) + 'R' + str(self.CurrentPosIndex[0]) + 'C' + str(self.CurrentPosIndex[1]) + '_PMT_' + str(imageSequence) + 'Zpos' + str(self.ZStackOrder) + '.tif')) #save as tif plt.figure() plt.imshow(self.PMT_image_reconstructed, cmap=plt.cm.gray) plt.show() except: print('No.{} image failed to generate.'.format( imageSequence)) self.PMTimageDict['RoundPackage_{}'.format( self.RoundWaveformIndex[0])]['row_{}_column_{}_stack{}'.format( self.CurrentPosIndex[0], self.CurrentPosIndex[1], self.ZStackOrder)] = self.PMT_image_reconstructed_stack print('ProcessData executed.') #-----------------------------------------------------------------Sorting the cells------------------------------------------------------------------------------------------------------------ def SortingPropertiesArray(self, All_cell_properties): #------------------------------------------CAN use 'import numpy.lib.recfunctions as rfn' to append field-------------- original_cp = rfn.append_fields(All_cell_properties, 'Original_sequence', list(range(0, len(All_cell_properties))), usemask=False) #print('*********************sorted************************') sortedcp = self.ImageAnalysisInstance.sort_using_weight( original_cp, 'Mean intensity in contour', 'Contour soma ratio', 'Change', self.GeneralSettingDict['Mean intensity in contour weight'], self.GeneralSettingDict['Contour soma ratio weight'], self.GeneralSettingDict['Change weight']) #******************************Add ranking to it********************************* ranked_cp = rfn.append_fields(sortedcp, 'Ranking', list(range(0, len(All_cell_properties))), usemask=False) #print('***********************Original sequence with ranking**************************') withranking_cp = np.sort(ranked_cp, order='Original_sequence') # All the cells are ranked, now we find the desired group and their position indexs, call the images and show labels of # these who meet the requirements, omitting bad ones. #get the index cell_properties_selected_hits = ranked_cp[ 0:self.GeneralSettingDict['selectnum']] cell_properties_selected_hits_index_sorted = np.sort( cell_properties_selected_hits, order=['Row index', 'Column index']) index_samples = np.vstack( (cell_properties_selected_hits_index_sorted['Row index'], cell_properties_selected_hits_index_sorted['Column index'])) merged_index_samples = index_samples[:, 0] # Merge coordinates which are the same. #consider these after 1st one for i in range(1, len(index_samples[0])): #print(index_samples[:,i][0] - index_samples[:,i-1][0]) if index_samples[:, i][0] != index_samples[:, i - 1][ 0] or index_samples[:, i][1] != index_samples[:, i - 1][1]: merged_index_samples = np.append(merged_index_samples, index_samples[:, i], axis=0) merged_index_samples = merged_index_samples.reshape( -1, 2) # 1st column=i, 2nd column=j return withranking_cp, merged_index_samples
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.ludlStage = LudlStage("COM6")
def __init__(self, xRel, yRel, *args, **kwargs): super().__init__(*args, **kwargs) self.ludlStage = LudlStage("COM6") self.xRel = xRel self.yRel = yRel