def __release(self): self.__camera_timer.stop() manager = DetectorConfig.instance().get_manager() video_cameras = DetectorConfig.instance().get_video_cameras() configs = manager.get_configs() for i, cfg in enumerate(configs): video_cameras[i].release()
def showEvent(self, event): manager = DetectorConfig.instance().get_manager() main_idx, self.__main_cfg = manager.get_main_config() self.__capturing = True video_cameras = DetectorConfig.instance().get_video_cameras() configs = manager.get_configs() self.__main_cam = video_cameras[main_idx] for i, cfg in enumerate(configs): video_cameras[i].open(cfg["camera_uri"]) # test only # video_cameras[i].open(Videos.instance().next()) self.__sample_left, self.__sample_right = manager.get_sample_left( ), manager.get_sample_right() self.__pre_sample_left, self.__pre_sample_right = manager.preprocess_images( self.__main_cfg, self.__sample_left, self.__sample_right) self.image1.imshow(None) self.left_detected_image.imshow(None) self.right_detected_image.imshow(None) self.side_result_image.imshow(None) self.ui.inpResult.setHtml("<b>RESULT</b>") self.__set_btn_capture_text() self.__view_image_sample() self.__load_config() self.__camera_timer.start(20) return
def binding(self): self.action_edit = self.ui.btnEditConfig.clicked self.action_start = self.ui.btnStart.clicked self.action_exit = self.ui.btnExit.clicked self.ui.btnLogout.clicked.connect(self.btn_log_out) DetectorConfig.instance().manager_changed.connect(self.manager_changed) return
def btn_save_sample_clicked(self): if self.__detected_pair is not None: left, right = self.__detected_pair folder_path = DetectorConfig.instance().get_current_path() if folder_path is None: helpers.show_message("You must save configuration first") return cv2.imwrite(os.path.join(folder_path, detector.SAMPLE_LEFT_FILE), left) cv2.imwrite(os.path.join(folder_path, detector.SAMPLE_RIGHT_FILE), right) DetectorConfig.instance().get_manager().load_sample_images() helpers.show_message("Save successfully") self.__set_btn_next_enabled()
def chk_main_camera_state_changed(self): is_checked = self.ui.chkMainCamera.isChecked() is_main = self.__current_cfg["is_main"] if is_main != is_checked and self.__actual_length_edited: helpers.show_message("You must save actual length first") self.ui.chkMainCamera.setCheckState( Qt.CheckState.Checked if not is_checked else Qt.CheckState. Unchecked) return elif is_main == is_checked: return DetectorConfig.instance().get_manager().set_main_config( self.__current_cfg["name"] if is_checked else None) self.__change_ui_based_on_is_main()
def __process_contours(self, image): manager = DetectorConfig.instance().get_manager() boxes, proc = manager.extract_boxes(self.__current_cfg, image) for b in boxes: c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b helper.draw_boxes(image, box) return image, proc
def showEvent(self, event): _, self.__current_cfg = DetectorConfig.instance().get_current_cfg() self.__set_btn_capture_text() if self.__current_cfg is None: self.__show_config_section(False) return self.__load_config()
def __preprocess_color(self, sample_left, sample_right): manager = DetectorConfig.instance().get_manager() pre_sample_left = manager.preprocess(self.__current_cfg, sample_left, True) pre_sample_right = manager.preprocess(self.__current_cfg, sample_right, False) return pre_sample_left, pre_sample_right
async def __detect_asym_diff(self, left, right): sim_cfg = self.__current_cfg["sim_cfg"] min_sim = sim_cfg['min_similarity'] manager = DetectorConfig.instance().get_manager() left_result, right_result = await manager.detect_asym( self.__current_cfg, left, right, self.__sample_left, self.__sample_right, None) is_asym_diff_left, avg_asym_left, avg_amp_left, recalc_left, res_list_l, amp_res_list_l = left_result is_asym_diff_right, avg_asym_right, avg_amp_right, recalc_right, res_list_r, amp_res_list_r = right_result # calculate calc_factor both side and then keep the highest only tmp_re_calc_factor_left = avg_asym_left / avg_amp_left tmp_re_calc_factor_right = avg_asym_right / avg_amp_right if (tmp_re_calc_factor_left > sim_cfg['re_calc_factor_left']): sim_cfg['re_calc_factor_left'] = tmp_re_calc_factor_left if (tmp_re_calc_factor_right > sim_cfg['re_calc_factor_right']): sim_cfg['re_calc_factor_right'] = tmp_re_calc_factor_right # update result to screen self.ui.inpReCalcFactorLeft.setValue(sim_cfg["re_calc_factor_left"]) self.ui.inpReCalcFactorRight.setValue(sim_cfg["re_calc_factor_right"]) # result display cur = datetime.datetime.now() cur_date_str = cur.strftime(ISO_DATE_FORMAT) left_result_text = "PASSED" if not is_asym_diff_left else "FAILED" right_result_text = "PASSED" if not is_asym_diff_right else "FAILED" result_text = f"<b>RESULT</b><br/>" + f"<b>Time</b>: {cur_date_str}<br/>" result_text += f"<b>Original similarity</b>: Left {avg_asym_left:.2f} - Right {avg_asym_right:.2f}<br/>" result_text += f"<b>Final similarity</b>: Left {recalc_left:.2f} - Right {recalc_right:.2f}<br/>" result_text += f"<b>Final result</b>: Left {left_result_text} - Right {right_result_text}" self.ui.inpResult.setHtml(result_text)
def __process_pair(self, image): manager = DetectorConfig.instance().get_manager() boxes, proc = manager.extract_boxes(self.__current_cfg, image) final_grouped, sizes, check_group_idx, pair, split_left, split_right, image_detect = manager.detect_groups_and_checked_pair( self.__current_cfg, boxes, image) unit = self.__current_cfg["length_unit"] for idx, group in enumerate(final_grouped): for b_idx, b in enumerate(group): c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b cur_size = sizes[idx][b_idx] lH, lW = cur_size helper.draw_boxes_and_sizes(image, idx, box, lH, lW, unit, tl, br) if (pair is not None): manager.check_group(check_group_idx, final_grouped) left, right = pair left, right = left[0], right[0] left = cv2.flip(left, 1) label_w = self.image1.width() label_h = self.image1.height() images = [left, right] self.__detected_pair = images final_img = helpers.concat_images(images, label_w, label_h) return image, final_img, images return image, None, None
def showEvent(self, event): _, self.__current_cfg = DetectorConfig.instance().get_current_cfg() self.__detected_pair = None self.image3.imshow(None) manager = DetectorConfig.instance().get_manager() left = manager.get_sample_left() right = manager.get_sample_right() if left is not None: label_w = self.image1.width() label_h = self.image1.height() images = [left, right] self.__detected_pair = images final_img = helpers.concat_images(images, label_w, label_h) self.image3.imshow(final_img) self.__set_btn_next_enabled() self.__set_btn_capture_text() self.__load_config()
def table_action_remove_triggered(self, point): table = self.ui.tblCameraConfig chosen_row = table.currentRow() detector_cfg = DetectorConfig.instance() camera_name = table.item(chosen_row, 0).text() _, cfg = detector_cfg.get_manager().get_config_by_name(camera_name) detector_cfg.remove_config(cfg) self.__reload_table()
def __view_image_sample(self): manager = DetectorConfig.instance().get_manager() label_w, label_h = self.left_detected_image.width( ), self.left_detected_image.height() dim = (label_w, label_h) m_left = cv2.resize(self.__sample_left, dim) m_right = cv2.resize(self.__sample_right, dim) self.left_sample_image.imshow(m_left) self.right_sample_image.imshow(m_right)
def __process_image(self, image): manager = DetectorConfig.instance().get_manager() contour, detected_pair = self.__process_pair(image) if detected_pair is not None and manager.get_model() is not None: runnable = WorkerRunnable(self.__detect_error, detected_pair, parent=self) runnable.work_error.connect(lambda ex: print(ex)) QThreadPool.globalInstance().start(runnable) return contour
def btn_detect_clicked(self): manager = DetectorConfig.instance().get_manager() if self.__last_pair is not None and manager.get_model() is not None: if len(self.__last_pair) == 0: helpers.show_message("No pair detected") return images = [img.copy() for idx, img in enumerate(self.__last_pair)] runnable = WorkerRunnable(self.__detect_error, images, parent=self) runnable.work_error.connect(lambda ex: print(ex)) QThreadPool.globalInstance().start(runnable)
def __init__(self, identity_service: IdentityService): QMainWindow.__init__(self) self.__identity_service = identity_service self.__view_cam = None self.__video_camera = cv2.VideoCapture() self.__detector_cfg = DetectorConfig.instance() self.__camera_timer = self.__detector_cfg.get_timer() self.ui = Ui_MainWindow() self.ui.setupUi(self) self.build() self.binding()
def __view_image_sample(self): manager = DetectorConfig.instance().get_manager() left = manager.get_sample_left() right = manager.get_sample_right() m_left, m_right = self.__preprocess_color(left, right) label_h = self.ui.screen2.height() img_size = (156, label_h - 30) m_left = cv2.resize(m_left, img_size, interpolation=cv2.INTER_AREA) m_right = cv2.resize(m_right, img_size, interpolation=cv2.INTER_AREA) self.image_sample_left.imshow(m_left) self.image_sample_right.imshow(m_right)
def binding(self): self.backscreen = self.ui.btnBack.clicked self.nextscreen = self.ui.btnNext.clicked self.ui.btnCapture.clicked.connect(self.btn_capture_clicked) DetectorConfig.instance().manager_changed.connect(self.manager_changed) self.ui.sldBrightness.valueChanged.connect( self.sld_brightness_value_change) self.ui.sldContrast.valueChanged.connect( self.sld_contrast_value_change) self.ui.sldThreshold1.valueChanged.connect( self.sld_threshold1_value_change) self.ui.sldThreshold2.valueChanged.connect( self.sld_threshold2_value_change) self.ui.sldBlur.valueChanged.connect(self.sld_blur_value_change) self.ui.sldDilate.valueChanged.connect(self.dilate_value_change) self.ui.sldErode.valueChanged.connect(self.sld_erode_value_change) self.ui.sldBkgThresh.valueChanged.connect(self.sld_bkg_value_change) self.ui.sldLightAdj.valueChanged.connect( self.sld_light_adj_value_change) self.ui.sldLightAdjRange.valueChanged.connect( self.sld_light_adj_range_value_change) self.ui.cbbCamera.currentIndexChanged.connect(self.cbbCamera_changed) self.ui.btnColorFrom.clicked.connect(self.btn_color_from_clicked) self.ui.btnColorTo.clicked.connect(self.btn_color_to_clicked) self.ui.cbbHeight.currentIndexChanged.connect(self.cbbHeight_changed) self.ui.cbbWidth.currentIndexChanged.connect(self.cbbWidth_changed) self.ui.cbbMethod.currentIndexChanged.connect(self.cbbMethod_changed) self.ui.ckbInvertThresh.stateChanged.connect( self.chk_thresh_invert_state_change) self.ui.ckbInvertRange.stateChanged.connect( self.chk_range_invert_state_change) self.ui.tblCameraConfig.itemSelectionChanged.connect( self.tbl_camera_item_selection_changed) self.ui.tblCameraConfig.customContextMenuRequested.connect( self.table_context_menu) self.tblPopActionEditName.triggered.connect( self.table_action_edit_name_triggered) self.tblPopActionRemove.triggered.connect( self.table_action_remove_triggered) self.ui.btnAdd.clicked.connect(self.btn_add_clicked)
def camera_timer_timeout(self): manager = DetectorConfig.instance().get_manager() _, image = self.__main_cam.read() if image is None: # test only # self.__main_cam.open(Videos.instance().next()) # _, image = self.__main_cam.read() self.image1.imshow(image) return frame_width, frame_height = self.__main_cfg[ "frame_width"], self.__main_cfg["frame_height"] image = cv2.resize(image, (frame_width, frame_height)) trio.run(self.process_image, image)
def validate_show(self): manager = DetectorConfig.instance().get_manager() _, main_cfg = manager.get_main_config() if main_cfg is None: return "No main configuration available" has_model = manager.get_model() is not None configs = manager.get_configs() for cfg in configs: if cfg["is_defect_enable"] and not has_model: return "Defect detection enabled but no model founded" if manager.get_sample_left() is None or manager.get_sample_right( ) is None: return "Samples not found" return None
def btn_add_clicked(self): detector_cfg = DetectorConfig.instance() manager = detector_cfg.get_manager() table = self.ui.tblCameraConfig camera_name = self.ui.txtNewCamera.text().strip() err_text = detector_cfg.validate_config_name(camera_name) if err_text is not None: helpers.show_message(err_text) return new_cfg = detector.default_detector_config() new_cfg["name"] = camera_name detector_cfg.add_config(new_cfg) self.__add_new_row(table, camera_name, "Side camera")
async def process_image(self, image): label_w = self.image1.width() label_h = self.image1.height() dim = (label_w, label_h) manager = DetectorConfig.instance().get_manager() idx, main_cfg = manager.get_main_config() if self.__last_display_type == "Original": resized_image = cv2.resize(image, dim) self.image1.imshow(resized_image) boxes, proc = manager.extract_boxes(main_cfg, image) final_grouped, sizes, check_group_idx, pair, split_left, split_right, image_detect = manager.detect_groups_and_checked_pair( main_cfg, boxes, image) if self.__last_display_type == "Detection": unit = main_cfg["length_unit"] for idx, group in enumerate(final_grouped): for b_idx, b in enumerate(group): c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b cur_size = sizes[idx][b_idx] lH, lW = cur_size helper.draw_boxes_and_sizes(image, idx, box, lH, lW, unit, tl, br) resized_image = cv2.resize(image, dim) self.image1.imshow(resized_image) elif self.__last_display_type == "Contours": resized_image = cv2.resize(proc, dim) self.image1.imshow(resized_image) if (pair is not None): manager.check_group(check_group_idx, final_grouped) left, right = pair left, right = left[0], right[0] left = cv2.flip(left, 1) label_w = self.left_detected_image.width() label_h = self.left_detected_image.height() dim = (label_w, label_h) left_resized, right_resized = cv2.resize(left, dim), cv2.resize( right, dim) self.left_detected_image.imshow(left_resized) self.right_detected_image.imshow(right_resized) self.__last_detect_time = datetime.datetime.now() runnable = WorkerRunnable(self.__process_pair, self.__last_detect_time, check_group_idx, final_grouped, sizes, pair, parent=self) runnable.work_error.connect(lambda ex: print(ex)) QThreadPool.globalInstance().start(runnable)
def __reload_roles(self): table = self.ui.tblCameraConfig manager = DetectorConfig.instance().get_manager() cfgs = manager.get_configs() count = 0 for cfg in cfgs: camera_name = cfg["name"] is_main = cfg["is_main"] item = table.item(count, 1) if (is_main): item.setData(Qt.ItemDataRole.DisplayRole, "Main camera") else: item.setData(Qt.ItemDataRole.DisplayRole, "Side camera") count += 1
def tbl_camera_item_selection_changed(self): table = self.ui.tblCameraConfig chosen_row = table.currentRow() if chosen_row == self.__last_selected_row: return self.__last_selected_row = chosen_row detector_cfg = DetectorConfig.instance() if chosen_row != -1: camera_name = table.item(chosen_row, 0).text() detector_cfg.set_current_cfg_name(camera_name) _, self.__current_cfg = detector_cfg.get_current_cfg() self.__show_config_section(True) self.__load_config() else: detector_cfg.set_current_cfg_name(None) self.__show_config_section(False) self.camera_changed.emit(-1)
def __process_image(self, image): manager = DetectorConfig.instance().get_manager() boxes, proc = manager.extract_boxes(self.__current_cfg, image) sizes = [] for idx, b in enumerate(boxes): c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b sizes.append((dimA, dimB)) length_per_10px = self.__current_cfg["length_per_10px"] unit = self.__current_cfg["length_unit"] if length_per_10px is not None and length_per_10px != 0: dimA, dimB = helper.calculate_length( dimA, length_per_10px), helper.calculate_length( dimB, length_per_10px) helper.draw_boxes_and_sizes(image, None, box, dimA, dimB, unit, tl, br) return image, sizes
def table_action_edit_name_triggered(self): table = self.ui.tblCameraConfig chosen_row = table.currentRow() detector_cfg = DetectorConfig.instance() camera_name = table.item(chosen_row, 0).text() dialog = DialogEditName(camera_name, parent=self) choice = dialog.exec_() if choice != QDialog.Accepted: return new_name = dialog.get_inp_edit_name().strip() err_text = detector_cfg.validate_config_name(new_name) if err_text is not None: helpers.show_message(err_text) return _, cfg = detector_cfg.get_current_cfg() cfg["name"] = new_name self.__reload_table()
def __process_pair(self, image): manager = DetectorConfig.instance().get_manager() boxes, proc = manager.extract_boxes(self.__current_cfg, image) image_detect = image.copy() pair, image_detect, boxes = manager.detect_pair_side_cam( self.__current_cfg, boxes, image_detect) # output for b in boxes: c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b helper.draw_boxes(image, box) if (pair is not None): pair_len = len(pair) images = [item[0] for item in pair] return image, images return image, None
async def __find_amp_threshold(self, img_left, img_right): manager = DetectorConfig.instance().get_manager() sample_left = manager.get_sample_left() sample_right = manager.get_sample_right() sample_left, sample_right = self.__preprocess_color( sample_left, sample_right) left_task, right_task = await manager.compare_colors( self.__current_cfg, img_left, img_right, sample_left, sample_right, not self.__amp_thresh_edited, None) _, avg_diff_l, left_hist, is_diff_l = left_task _, avg_diff_r, right_hist, is_diff_r = right_task blue = max(left_hist[0], right_hist[0]) green = max(left_hist[1], right_hist[1]) red = max(left_hist[2], right_hist[2]) max_blue, max_green, max_red = self.__max_blue, self.__max_green, self.__max_red if (blue > max_blue): max_blue = blue if (green > max_green): max_green = green if (red > max_red): max_red = red amp_thresh = (float(max_blue), float(max_green), float(max_red)) if self.__amp_thresh_edited: self.__max_blue = max_blue self.__max_red = max_red self.__max_green = max_green self.ui.ampThreshBlue.setValue(amp_thresh[0]) self.ui.ampThreshGreen.setValue(amp_thresh[1]) self.ui.ampThreshRed.setValue(amp_thresh[2]) self.__current_cfg["color_cfg"]["amplify_thresh"] = amp_thresh # result display cur = datetime.datetime.now() cur_date_str = cur.strftime(ISO_DATE_FORMAT) result_text = f"<b>RESULT</b><br/>" + f"<b>Time</b>: {cur_date_str}<br/>" avg_diff_l *= 100 avg_diff_r *= 100 r, g, b = amp_thresh[0], amp_thresh[1], amp_thresh[2] result_text += f"<b>Current different value</b>: {r:.2f}, {g:.2f}, {b:.2f}<br/>" result_text += f"<b>Left different</b>: {avg_diff_l:.2f}%<br/>" result_text += f"<b>Right different</b>: {avg_diff_r:.2f}%<br/>" if not self.__amp_thresh_edited: left_result_text = "PASSED" if not is_diff_l else "FAILED" right_result_text = "PASSED" if not is_diff_r else "FAILED" result_text += f"<b>Result</b>: Left {left_result_text} - Right {right_result_text}" self.ui.inpResult.setHtml(result_text)
def __reload_table(self): table = self.ui.tblCameraConfig table.clearContents() table.setRowCount(0) manager = DetectorConfig.instance().get_manager() cfgs = manager.get_configs() for cfg in cfgs: camera_name = cfg["name"] is_main = cfg["is_main"] if (is_main): self.__add_new_row(table, camera_name, "Main camera") else: self.__add_new_row(table, camera_name, "Side camera") table.clearSelection() table.itemSelectionChanged.emit() self.__last_selected_row = -1 self.image1.imshow(None) self.image2.imshow(None) self.image3.imshow(None)
async def __detect_error_on_picture(self, images): manager = DetectorConfig.instance().get_manager() err_task = manager.detect_errors(self.__current_cfg, images, None) boxes, scores, classes, valid_detections = await err_task err_cfg = self.__current_cfg["err_cfg"] helper.draw_yolo_results(images, boxes, scores, classes, err_cfg["classes"], err_cfg["img_size"], min_score=err_cfg["yolo_score_threshold"]) label_w = self.image3.width() label_h = self.image3.height() for idx, img in enumerate(images): images[idx] *= 255. images[idx] = np.asarray(images[idx], np.uint8) final_img = helpers.concat_images(images, label_w, label_h) self.image3.imshow(final_img)