def issue_command(self, command, suppress_msg=False): ''' sends command and handles any errors from stage ''' command_string = '{}\r'.format(command) if (not suppress_msg): comment('sending command to stage:{}'.format(command_string)) self.ser.write(command_string.encode('utf-8'))
def get_long_response(self): response = '' while 'END' not in response: piece = self.ser.read() if piece != b'': response += piece.decode('utf-8') comment('response received from stage:{}'.format(response)) return response
def get_response(self): response = '' while '\r' not in response: piece = self.ser.read() if piece != b'': response += piece.decode('utf-8') comment('response received from attenuator:{}'.format(response)) return response
def calculateCriteriaWeights(preferenceMatrix, criteriaArray): comment(f"Calculating criteria weights...") matrixSize = len(preferenceMatrix) verticalSum = np.sum(preferenceMatrix, axis=0) criteriaWeights = (np.sum(preferenceMatrix / verticalSum, axis=1)) / matrixSize for _ in range(matrixSize): criteriaArray[_].weight = criteriaWeights[_] return criteriaWeights
def computeConsistency(preferenceMatrix, criteriaWeights): comment(f"Calculating consistency index...") matrixSize = len(preferenceMatrix) consistencyMatrix = preferenceMatrix * criteriaWeights weightedSum = np.sum(consistencyMatrix, axis=1) lmdMax = sum(weightedSum / criteriaWeights) / matrixSize consistencyIndex = (lmdMax - matrixSize) / (matrixSize - 1) consistencyRatio = consistencyIndex / RATIO_COEF[matrixSize] return (consistencyIndex, consistencyRatio)
def click_move_slot(self, click_x, click_y): # center movement: # window_center = np.array([851/2,681/2]) # reticle movement: window_center = np.array( [self.reticle_x * 851 / 1024, self.reticle_y * 681 / 822]) mouse_click_location = np.array([click_x, click_y]) pixel_move_vector = mouse_click_location - window_center step_move_vector = self.scale_move_vector(pixel_move_vector) step_move_vector = self.remove_calibrated_error( step_move_vector[0], step_move_vector[1]) comment('click move vector: {}'.format(step_move_vector)) return self.move_relative(step_move_vector)
def localize2(self): box_size = 5 self.well_center = self.get_stage_position() stitcher = wellStitcher(box_size, self.image) directions = self.get_spiral_directions(box_size) for num, let in directions: self.delay() self.move_frame(let) self.delay() QApplication.processEvents() stitcher.add_img(let, self.image) comment('writing well tile file...') stitcher.write_well_img() comment('tiling completed!')
def compensate_for_objective_offsets(self, present_mag, future_mag): compensation_dict = { 4: np.zeros(2), 20: np.array([-67, -115]), 40: np.array([-78, -124]), 60: np.array([-74, -132]), 100: np.array([-79, -139]) } # get back to 4 first move = -1 * compensation_dict[present_mag] # now compensate for offsets from 4 move = move + compensation_dict[future_mag] comment('objective offset correction: {}'.format(move)) self.move_relative(move) self.magnification = future_mag
def get_experiment_variables(self): var_dict = { 'stain(s) used:': 'Enter the stain(s) used', 'cell line:': 'Enter the cell line', 'fixative used:': 'Enter the fixative used' } nums = range(10) checks = ['a', 'e', 'i', 'o', 'u'] + [str(num) for num in nums] for key, value in var_dict.items(): good_entry = False while good_entry != True: user_input = self.get_text(var_dict[key]) val = user_input.lower() if any(vowel in val for vowel in checks): comment('{} {}'.format(key, user_input)) good_entry = True
def startVideo(self): # camera_port = 1 #*2) # self.camera.set(15,52.131) comment('video properties:') for i in range(19): comment('property {}, value: {}'.format(i, self.camera.get(i))) while self.run_video: ret, image = self.camera.read() # image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR) self.vid_process_signal.emit(image.copy()) # print(cv2.Laplacian(image, cv2.CV_64F).var()) self.draw_reticle(image) if self.noise_removal == True: # print('denoising...') # self.camera.set(3,1024) # self.camera.set(4,822) # image = cv2.fastNlMeansDenoisingColored(image,None,3,7,7) lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) l, a, b = cv2.split(lab) clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)) cl = clahe.apply(l) limg = cv2.merge((cl, a, b)) image = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR) # print('done denoising') color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) height, width, _ = color_swapped_image.shape qt_image = QtGui.QImage(color_swapped_image.data, width, height, color_swapped_image.strides[0], QtGui.QImage.Format_RGB888) qt_image = qt_image.scaled(self.window_size) self.VideoSignal.emit(qt_image) self.camera.release() comment('ending video')
def __init__(self, parent=None): super(autofocuser, self).__init__(parent) self.ch = Stepper() self.ch.openWaitForAttachment(5000) self.ch.setEngaged(True) self.full_scale = 27300 self.image_count = 0 self.track_position = False self.pool = ThreadPool(processes=3) self.velocity = 0 self.ch.setDataInterval(100) self.position = 0 self.focused_position = 0 self.status_dict = { 'current limit': self.ch.getCurrentLimit, 'control mode': self.ch.getControlMode, # 'setting min position: ': self.ch.setMinPosition(0), 'min position': self.ch.getMinPosition, # 'setting max position: ': self.ch.setMaxPosition(self.full_scale), 'max position': self.ch.getMaxPosition, 'rescale factor': self.ch.getRescaleFactor, 'target position': self.ch.getTargetPosition, 'acceleration': self.ch.getAcceleration, 'engaged?': self.ch.getEngaged, 'max velocity:': self.ch.getMaxVelocityLimit, 'data interval': self.ch.getDataInterval, 'min data interval': self.ch.getMinDataInterval } for k, v in self.status_dict.items(): comment('{}: {}'.format(k, v())) self.ch.setOnVelocityChangeHandler(self.velocity_change_handler) self.ch.setOnPositionChangeHandler(self.position_change_handler) self.image_title = 0 self.focus_model = load_model( os.path.join(experiment_folder_location, 'VGG_model_5.hdf5')) self.focus_model._make_predict_function() self.belt_slip_offset = 120
def change_type_to_lyse(self, index): map_dict = { # 0:('red','multiclass_localizer18_2.hdf5'), # 1:('green','multiclass_localizer18_2.hdf5'), 0: ('red', 'model2018-10-18_08_47'), 1: ('green', 'model2018-10-18_08_47'), 2: ('green hope', 'second_binary_green_hope_localizer_16_0.28892_1_54_7_12.hdf5') } self.cell_type_to_lyse = map_dict[index][0] comment('loading cell localizer model...{}'.format(map_dict[index][1])) self.localizer_model = None K.clear_session() # TODO: remove this if statement once we have a model that doesnt need the custom object if map_dict[index] != 'green hope': self.localizer_model = load_model( os.path.join(experiment_folder_location, map_dict[index][1]), custom_objects={'mean_iou': mean_iou}) else: self.localizer_model = load_model( os.path.join(experiment_folder_location, map_dict[index][1])) self.localizer_model._make_predict_function() comment('changed cell type to:' + str(self.cell_type_to_lyse))
def localize(self): ''' function to scan an entire well, and lyse the number of cells desired by the user, using the method of lysis that the user selects, then returns to the original position (the center of the well) ''' # first get our well center position self.lysed_cell_count = 0 self.auto_lysis = True self.well_center = self.get_stage_position() # now start moving and lysing all in view self.lyse_all_in_view() box_size = 5 # stitcher = wellStitcher(box_size,self.image) directions = self.get_spiral_directions(box_size) self.get_well_center = False for num, let in directions: for i in range(num): if self.auto_lysis == False: self.stop_laser_flash_signal.emit() return if self.lysed_cell_count >= self.cells_to_lyse: self.delay() self.return_to_original_position(self.well_center) return self.delay() self.move_frame(let) self.delay() QApplication.processEvents() self.delay() QApplication.processEvents() # stitcher.add_img(let,self.image) self.lyse_all_in_view() comment('lysis completed!') # stitcher.write_well_img() self.return_to_original_position(self.well_center)
def buildPrefrenceMatrix(criteriaArray): numberOfCriteria = len(criteriaArray) comment(f"Building prefernce matrix for {numberOfCriteria} criterias...") matrix = np.ones((numberOfCriteria, numberOfCriteria)) comment("Enter priority rating:", empty=True) comment( " [1: equal importance, 3: Moderate importance, 5: Strong importance, 7: very strong importance, 9: Extreme importance, 1/3=0.333 1/5=0.2 1/7=0.143 1/9=0.111: inverse comparaison]", empty=True) for row in range(numberOfCriteria): for col in range(row + 1, numberOfCriteria): matrix[row][col] = compareCriteria(criteriaArray[row], criteriaArray[col]) matrix[col][row] = 1 / matrix[row][col] return matrix
def issue_command(self,command): command_string = '{}\r\n'.format(command) comment('sending command to laser:{}'.format(command_string.split('\r')[0])) self.ser.write(command_string.encode('utf-8'))
def setImage(self, image): if image.isNull(): comment("Viewer Dropped frame!") self.image = image self.update()
def ai_fire_qswitch_slot(self, auto_fire): comment('automated firing from localizer!') if auto_fire == True: laser.qswitch_auto() else: laser.fire_qswitch()
def send_user_comment(self): comment('user comment:{}'.format(self.ui.comment_box.toPlainText())) self.ui.comment_box.clear()
def qswitch_screenshot_slot(self): self.qswitch_screenshot_signal.emit(15) comment('stage position during qswitch: {}'.format( stage.get_position_slot())) laser.fire_qswitch()
def __init__(self, test_run): super(main_window, self).__init__() self.lysing = True # get our experiment variables if test_run != 'True': self.get_experiment_variables() # Set up the user interface self.ui = Ui_MainWindow() self.ui.setupUi(self) # set up the video classes self.vid = ShowVideo(self.ui.verticalLayoutWidget.size()) self.screen_shooter = screen_shooter() self.image_viewer = ImageViewer() # self.autofocuser = autofocuser() self.localizer = Localizer() # add the viewer to our ui self.ui.verticalLayout.addWidget(self.image_viewer) # create our extra threads self.screenshooter_thread = QThread() self.screenshooter_thread.start() self.screen_shooter.moveToThread(self.screenshooter_thread) # self.autofocuser_thread = QThread() # self.autofocuser_thread.start() # self.autofocuser.moveToThread(self.autofocuser_thread) self.localizer_thread = QThread() self.localizer_thread.start() self.localizer.moveToThread(self.localizer_thread) self.video_input_thread = QThread() self.video_input_thread.start() self.vid.moveToThread(self.video_input_thread) # connect the outputs to our signals self.vid.VideoSignal.connect(self.image_viewer.setImage) self.vid.vid_process_signal.connect( self.screen_shooter.screenshot_slot) # self.vid.vid_process_signal.connect(self.autofocuser.vid_process_slot) self.vid.vid_process_signal.connect(self.localizer.vid_process_slot) self.qswitch_screenshot_signal.connect( self.screen_shooter.save_qswitch_fire_slot) self.localizer.qswitch_screenshot_signal.connect( self.screen_shooter.save_qswitch_fire_slot) # self.start_focus_signal.connect(self.autofocuser.autofocus) self.start_localization_signal.connect(self.localizer.localize) # self.autofocuser.position_and_variance_signal.connect(self.plot_variance_and_position) self.image_viewer.click_move_signal.connect(stage.click_move_slot) self.localizer.localizer_move_signal.connect(stage.localizer_move_slot) self.localizer.ai_fire_qswitch_signal.connect( self.ai_fire_qswitch_slot) self.localizer.start_laser_flash_signal.connect( self.start_laser_flash_slot) self.localizer.stop_laser_flash_signal.connect( self.stop_laser_flash_slot) self.vid.reticle_and_center_signal.connect( stage.reticle_and_center_slot) self.vid.reticle_and_center_signal.emit(self.vid.center_x, self.vid.center_y, self.vid.reticle_x, self.vid.reticle_y) # connect to the video thread and start the video self.start_video_signal.connect(self.vid.startVideo) self.start_video_signal.emit() # Screenshot and comment buttons self.ui.misc_screenshot_button.clicked.connect( self.screen_shooter.save_misc_image) self.ui.user_comment_button.clicked.connect(self.send_user_comment) self.ui.noise_filter_checkbox.stateChanged.connect( self.noise_filter_check_changed) # Stage movement buttons self.ui.step_size_doublespin_box.valueChanged.connect( stage.set_step_size) self.setup_combobox() self.localizer.get_position_signal.connect(stage.get_position_slot) stage.position_return_signal.connect( self.localizer.position_return_slot) # Laser control buttons self.ui.qswitch_delay_doublespin_box.valueChanged.connect( laser.set_delay) self.ui.attenuator_doublespin_box.valueChanged.connect( attenuator.set_attenuation) self.ui.cells_to_lyse_doublespin_box.valueChanged.connect( self.localizer.set_cells_to_lyse) self.ui.process_well_pushButton.clicked.connect( self.start_localization) self.show() comment('finished gui init')
def processPreferenceMatrix(preferenceMatrix, criteriaArray): comment(f"Processing size {len(criteriaArray)} prefernce matrix...") criteriaWeights = calculateCriteriaWeights(preferenceMatrix, criteriaArray) consistencyIndex = computeConsistency(preferenceMatrix, criteriaWeights) return consistencyIndex
def get_position(self): self.position = self.ch.getPosition() comment('stepper position: {}'.format(self.position)) return self.position
# 75:self.autofocuser.stop_roll } if event.key() in key_control_dict.keys(): key_control_dict[event.key()]() def closeEvent(self, event): self.vid.run_video = False @QtCore.pyqtSlot('PyQt_PyObject') def plot_variance_and_position(self, ituple): positions = ituple[0] variances = ituple[1] plt.plot(positions) plt.plot(variances) plt.legend(['Variance of Laplacian', 'AF Network Output']) plt.xlabel('Position') plt.ylabel('Focus Metric Comparison') plt.show() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('test_run') args = parser.parse_args() app = QApplication(sys.argv) stage = stage_controller() attenuator = attenuator_controller() laser = laser_controller() window = main_window(args.test_run) comment('exit with code: ' + str(app.exec_()))
def set_step_size(self, step_size): comment('step size changed to: {}'.format(step_size)) self.step_size = step_size
def set_cells_to_lyse(self, number_of_cells): self.cells_to_lyse = number_of_cells comment('set number of cells to lyse to:' + str(number_of_cells))
def change_magnification(self, index): map_dict = {0: 4, 1: 20, 2: 40, 3: 60, 4: 100} comment('magnification changed to: {}'.format(map_dict[index])) self.compensate_for_objective_offsets(self.magnification, map_dict[index])
def issue_command(self,command): command_string = ';AT:{}\n'.format(command) comment('sending command to attenuator:{}'.format(command_string.split('\n')[0])) self.ser.write(command_string.encode('utf-8'))
def compareCriteria(c1, c2): comment(f"How important is \"{c1.name}\" compared to \"{c2.name}\" ?", empty=True) return float(get())
def change_lysis_mode(self, index): map_dict = {0: 'direct', 1: 'excision'} self.lysis_mode = map_dict[index] comment('changed cell type to:' + str(self.lysis_mode))
def main(): print(f"\n\n ======== {timeStamp()} :: Saaty ========\n\n") comment("Enter number of criteria") numberOfCriteria = int(get()) criteriaArray = [] for _ in range(numberOfCriteria): comment(f"Enter critera, {numberOfCriteria-_} left: ") cname = get("Criteria name: ") criteriaArray.append(Criteria(cname)) preferenceMatrix = buildPrefrenceMatrix(criteriaArray) _, consistencyRatio = processPreferenceMatrix(preferenceMatrix, criteriaArray) if consistencyRatio < 0.1: comment("Computed criteria weights are valid for use.", empty=True) sortCriteria(criteriaArray) comment("Criteria weights for the specified matrix are: ") for crt in criteriaArray: comment(f"rank: {crt.rank} - {crt.name}: {round(crt.weight,3)}", empty=True) comment(f"with consistency ratio of: {round(consistencyRatio,4)}", empty=True) else: comment( "Computed criteria weights are not valid for use! please check priority rating.", empty=True) comment(f"with consistency ratio of: {round(consistencyRatio,4)}", empty=True)