class TestDetectPairScreen(QWidget):
    __detected_pair = None
    backscreen: Signal
    nextscreen: Signal
    captured = Signal()

    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.ui = Ui_test_detect_pair_screen()
        self.__current_cfg = None
        self.ui.setupUi(self)
        self.build()
        self.binding()

    def build(self):
        self.image1 = ImageWidget()
        self.image2 = ImageWidget()
        self.image3 = ImageWidget()
        self.imageLayout = self.ui.screen1.parentWidget().layout()
        self.imageLayout.replaceWidget(self.ui.screen1, self.image1)
        self.imageLayout.replaceWidget(self.ui.screen2, self.image2)
        self.imageLayout.replaceWidget(self.ui.screen4, self.image3)
        self.ui.screen1.deleteLater()
        self.ui.screen2.deleteLater()
        self.ui.screen4.deleteLater()

    def showEvent(self, event):
        _, self.__current_cfg = DetectorConfig.instance().get_current_cfg()
        self.__detected_pair = None
        self.image3.imshow(None)
        manager = DetectorConfig.instance().get_manager()
        left = manager.get_sample_left()
        right = manager.get_sample_right()
        if left is not None:
            label_w = self.image1.width()
            label_h = self.image1.height()
            images = [left, right]
            self.__detected_pair = images
            final_img = helpers.concat_images(images, label_w, label_h)
            self.image3.imshow(final_img)
        self.__set_btn_next_enabled()
        self.__set_btn_capture_text()
        self.__load_config()

    # binding
    def binding(self):
        self.backscreen = self.ui.btnBack.clicked
        self.nextscreen = self.ui.btnNext.clicked
        self.ui.btnCapture.clicked.connect(self.btn_capture_clicked)
        self.ui.btnSaveSample.clicked.connect(self.btn_save_sample_clicked)
        self.ui.btnRetakeSample.clicked.connect(self.btn_retake_sample_clicked)

    def btn_capture_clicked(self):
        self.captured.emit()
        self.__set_btn_capture_text()

    def __set_btn_capture_text(self):
        timer_active = DetectorConfig.instance().get_timer().isActive()
        self.ui.btnCapture.setText("CAPTURE" if not timer_active else "STOP")

    def __set_btn_next_enabled(self):
        manager = DetectorConfig.instance().get_manager()
        left = manager.get_sample_left()
        has_sample = left is not None
        self.ui.btnNext.setEnabled(has_sample)

    def view_cam(self, image):
        # read image in BGR format
        label_w = self.image1.width()
        label_h = self.image1.height()
        dim = (label_w, label_h)
        if image is None:
            self.image1.imshow(image)
            self.image2.imshow(image)
            # self.image3.imshow(image)
            return
        contour, detected, detected_pair = self.__process_pair(image.copy())
        img_resized = cv2.resize(image, dim)
        contour_resized = cv2.resize(contour, dim)
        self.image1.imshow(img_resized)
        self.image2.imshow(contour_resized)
        if detected is not None and self.__detected_pair is None:
            self.image3.imshow(detected)
            self.__detected_pair = detected_pair

    def __load_config(self):
        return

    def __process_pair(self, image):
        manager = DetectorConfig.instance().get_manager()
        boxes, proc = manager.extract_boxes(self.__current_cfg, image)
        final_grouped, sizes, check_group_idx, pair, split_left, split_right, image_detect = manager.detect_groups_and_checked_pair(
            self.__current_cfg, boxes, image)
        unit = self.__current_cfg["length_unit"]
        for idx, group in enumerate(final_grouped):
            for b_idx, b in enumerate(group):
                c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b
                cur_size = sizes[idx][b_idx]
                lH, lW = cur_size
                helper.draw_boxes_and_sizes(image, idx, box, lH, lW, unit, tl,
                                            br)
        if (pair is not None):
            manager.check_group(check_group_idx, final_grouped)
            left, right = pair
            left, right = left[0], right[0]
            left = cv2.flip(left, 1)
            label_w = self.image3.width()
            label_h = self.image3.height()
            images = [left, right]
            final_img = helpers.concat_images(images, label_w, label_h)
            return image, final_img, images
        return image, None, None

    def btn_save_sample_clicked(self):
        if self.__detected_pair is not None:
            left, right = self.__detected_pair
            folder_path = DetectorConfig.instance().get_current_path()
            if folder_path is None:
                helpers.show_message("You must save configuration first")
                return
            cv2.imwrite(os.path.join(folder_path, detector.SAMPLE_LEFT_FILE),
                        left)
            cv2.imwrite(os.path.join(folder_path, detector.SAMPLE_RIGHT_FILE),
                        right)
            DetectorConfig.instance().get_manager().load_sample_images()
            helpers.show_message("Save successfully")
            self.__set_btn_next_enabled()

    def btn_retake_sample_clicked(self):
        self.__detected_pair = None
        manager = DetectorConfig.instance().get_manager()
        manager.reset_samples()
        self.__set_btn_next_enabled()
        self.image3.imshow(None)
class ErrorDetectScreen(QWidget):
    backscreen: Signal
    nextscreen: Signal
    captured = Signal()

    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.ui = Ui_ErrorDetectScreen()
        self.__current_cfg = None
        self.ui.setupUi(self)
        self.build()
        self.binding()

    def build(self):
        self.image1 = ImageWidget()
        self.image2 = ImageWidget()
        self.image3 = ImageWidget()
        self.imageLayout = self.ui.screen1.parentWidget().layout()
        self.imageLayout.replaceWidget(self.ui.screen1, self.image1)
        self.imageLayout.replaceWidget(self.ui.screen2, self.image2)
        self.imageLayout.replaceWidget(self.ui.screen4, self.image3)
        self.ui.screen1.deleteLater()
        self.ui.screen2.deleteLater()
        self.ui.screen4.deleteLater()

        self.ui.cbbWidth.setPlaceholderText("Width")
        self.ui.cbbHeight.setPlaceholderText("Height")
        self.ui.cbbWidth.setCurrentIndex(-1)
        self.ui.cbbHeight.setCurrentIndex(-1)
        frame_resize_values = [str(32 * i) for i in range(1, 20)]

        self.ui.cbbHeight.clear()
        for value in frame_resize_values:
            self.ui.cbbHeight.addItem(value, userData=int(value))

        self.ui.cbbWidth.clear()
        for value in frame_resize_values:
            self.ui.cbbWidth.addItem(value, userData=int(value))

    def showEvent(self, event):
        _, self.__current_cfg = DetectorConfig.instance().get_current_cfg()
        self.__set_btn_capture_text()
        self.__load_config()

    def __load_config(self):
        err_cfg = self.__current_cfg["err_cfg"]
        img_size = err_cfg["img_size"]
        inp_shape = err_cfg["inp_shape"]
        yolo_iou_threshold = err_cfg["yolo_iou_threshold"]
        yolo_max_boxes = err_cfg["yolo_max_boxes"]
        yolo_score_threshold = err_cfg["yolo_score_threshold"]
        weights = err_cfg["weights"]
        classes = err_cfg["classes"]
        num_classes = err_cfg["num_classes"]
        is_defect_enable = self.__current_cfg["is_defect_enable"]

        width = img_size[0]
        height = img_size[1]

        self.ui.cbbWidth.setCurrentText(str(width))
        self.ui.cbbHeight.setCurrentText(str(height))
        self.ui.chkDefectDetection.setChecked(is_defect_enable)

        self.ui.inpModelChoice.setText(weights)
        self.ui.sldIoUThresh.setValue(int(yolo_iou_threshold * 100))
        self.ui.groupIoUThresh.setTitle(f"IoU threshold: {yolo_iou_threshold}")
        self.ui.sldScoreThresh.setValue(int(yolo_score_threshold * 100))
        self.ui.groupScoreThresh.setTitle(
            f"Score threshold: {yolo_score_threshold}")
        self.ui.inpMaxInstances.setValue(yolo_max_boxes)
        self.ui.inpClasses.setText(", ".join(classes))

    # binding
    def binding(self):
        self.backscreen = self.ui.btnBack.clicked
        self.nextscreen = self.ui.btnFinish.clicked
        self.ui.btnCapture.clicked.connect(self.btn_capture_clicked)
        self.ui.inpMaxInstances.textChanged.connect(
            self.inp_max_instances_changed)
        self.ui.sldScoreThresh.valueChanged.connect(
            self.sld_score_thresh_changed)
        self.ui.sldIoUThresh.valueChanged.connect(
            self.sld_iou_threshold_changed)
        self.ui.cbbHeight.currentIndexChanged.connect(
            self.width_height_changed)
        self.ui.cbbWidth.currentIndexChanged.connect(self.width_height_changed)
        self.ui.btnChooseModel.clicked.connect(self.btn_choose_model_clicked)
        self.ui.btnChooseClasses.clicked.connect(
            self.btn_choose_classes_clicked)
        self.ui.chkDefectDetection.stateChanged.connect(
            self.chk_defect_detection_state_changed)
        self.ui.btnReloadModel.clicked.connect(self.btn_reload_model_clicked)
        self.ui.btnChoosePicture.clicked.connect(
            self.btn_choose_picture_clicked)

    def chk_defect_detection_state_changed(self):
        checked = self.ui.chkDefectDetection.isChecked()
        self.__current_cfg["is_defect_enable"] = checked

    # hander
    @asyncSlot()
    async def btn_reload_model_clicked(self):
        try:
            await DetectorConfig.instance().get_manager().load_model(
                self.__current_cfg)
            helpers.show_message("Finish reloading model")
        except:
            helpers.show_message("Error reloading model")

    def btn_capture_clicked(self):
        self.captured.emit()
        self.__set_btn_capture_text()

    def __set_btn_capture_text(self):
        timer_active = DetectorConfig.instance().get_timer().isActive()
        self.ui.btnCapture.setText("CAPTURE" if not timer_active else "STOP")
        self.ui.btnChoosePicture.setEnabled(not timer_active)

    def inp_max_instances_changed(self):
        value = self.ui.inpMaxInstances.value()
        if value != self.__current_cfg["err_cfg"]["yolo_max_boxes"]:
            self.__current_cfg["err_cfg"]["yolo_max_boxes"] = value

    def sld_score_thresh_changed(self):
        value = self.ui.sldScoreThresh.value()
        self.ui.groupScoreThresh.setTitle(f"Score threshold: {value/100}")
        self.__current_cfg["err_cfg"]["yolo_score_threshold"] = value / 100

    def sld_iou_threshold_changed(self):
        value = self.ui.sldIoUThresh.value()
        self.ui.groupIoUThresh.setTitle(f"IoU threshold: {value/100}")
        self.__current_cfg["err_cfg"]["yolo_iou_threshold"] = value / 100

    def width_height_changed(self):
        height = self.ui.cbbHeight.currentData()
        width = self.ui.cbbWidth.currentData()
        self.__current_cfg["err_cfg"]["img_size"] = (width, height)
        self.__current_cfg["err_cfg"]["inp_shape"] = (height, width, 3)

    def chk_defect_detection_state_changed(self):
        checked = self.ui.chkDefectDetection.isChecked()
        self.__current_cfg["is_defect_enable"] = checked

    def btn_choose_model_clicked(self):
        url, _ = helpers.file_chooser_open_file(self,
                                                f_filter="Keras model (*.h5)")
        if url.isEmpty(): return
        file_name = url.toLocalFile()
        self.ui.inpModelChoice.setText(file_name)
        self.__current_cfg["err_cfg"]["weights"] = file_name

    @asyncSlot()
    async def btn_choose_picture_clicked(self):
        url, _ = helpers.file_chooser_open_file(
            self, f_filter="Images (*.jpg *.png *.bmp)")
        if url.isEmpty(): return
        file_name = url.toLocalFile()
        images = [cv2.imread(file_name)]
        await self.__detect_error_on_picture(images)

    def btn_choose_classes_clicked(self):
        url, _ = helpers.file_chooser_open_file(self, "Text file (*.txt)")
        if url.isEmpty(): return
        file_name = url.toLocalFile()
        classes = []
        with open(file_name) as txt_file:
            classes = txt_file.readlines()
        for idx, cl in enumerate(classes):
            classes[idx] = cl.replace("\n", "")
        self.__current_cfg["err_cfg"]["classes"] = classes
        self.__current_cfg["err_cfg"]["num_classes"] = len(classes)
        self.ui.inpClasses.setText(", ".join(classes))

    def view_cam(self, image):
        # read image in BGR format
        label_w = self.image1.width()
        label_h = self.image1.height()
        dim = (label_w, label_h)
        if image is None:
            self.image1.imshow(image)
            self.image2.imshow(image)
            self.image3.imshow(image)
            return
        orig = cv2.resize(image, dim)
        self.image1.imshow(orig)
        contour = self.__process_image(image)
        contour = cv2.resize(contour, dim)
        self.image2.imshow(contour)

    async def __detect_error(self, images):
        manager = DetectorConfig.instance().get_manager()
        err_task = manager.detect_errors(self.__current_cfg, images, None)
        boxes, scores, classes, valid_detections = await err_task
        err_cfg = self.__current_cfg["err_cfg"]
        helper.draw_yolo_results(images,
                                 boxes,
                                 scores,
                                 classes,
                                 err_cfg["classes"],
                                 err_cfg["img_size"],
                                 min_score=err_cfg["yolo_score_threshold"])

        label_w = self.image3.width()
        label_h = self.image3.height()
        for idx, img in enumerate(images):
            images[idx] *= 255.
            images[idx] = np.asarray(images[idx], np.uint8)
        final_img = helpers.concat_images(images, label_w, label_h)
        self.image3.imshow(final_img)

    async def __detect_error_on_picture(self, images):
        manager = DetectorConfig.instance().get_manager()
        err_task = manager.detect_errors(self.__current_cfg, images, None)
        boxes, scores, classes, valid_detections = await err_task
        err_cfg = self.__current_cfg["err_cfg"]
        helper.draw_yolo_results(images,
                                 boxes,
                                 scores,
                                 classes,
                                 err_cfg["classes"],
                                 err_cfg["img_size"],
                                 min_score=err_cfg["yolo_score_threshold"])
        label_w = self.image3.width()
        label_h = self.image3.height()
        for idx, img in enumerate(images):
            images[idx] *= 255.
            images[idx] = np.asarray(images[idx], np.uint8)
        final_img = helpers.concat_images(images, label_w, label_h)
        self.image3.imshow(final_img)

    def __process_pair(self, image):
        manager = DetectorConfig.instance().get_manager()
        boxes, proc = manager.extract_boxes(self.__current_cfg, image)
        final_grouped, sizes, check_group_idx, pair, split_left, split_right, image_detect = manager.detect_groups_and_checked_pair(
            self.__current_cfg, boxes, image)
        unit = self.__current_cfg["length_unit"]
        for idx, group in enumerate(final_grouped):
            for b_idx, b in enumerate(group):
                c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b
                cur_size = sizes[idx][b_idx]
                lH, lW = cur_size
                helper.draw_boxes_and_sizes(image, idx, box, lH, lW, unit, tl,
                                            br)
        if (pair is not None):
            manager.check_group(check_group_idx, final_grouped)
            left, right = pair
            left, right = left[0], right[0]
            left = cv2.flip(left, 1)
            return image, [left, right]
        return image, None

    def __process_image(self, image):
        manager = DetectorConfig.instance().get_manager()
        contour, detected_pair = self.__process_pair(image)
        if detected_pair is not None and manager.get_model() is not None:
            runnable = WorkerRunnable(self.__detect_error,
                                      detected_pair,
                                      parent=self)
            runnable.work_error.connect(lambda ex: print(ex))
            QThreadPool.globalInstance().start(runnable)
        return contour
class MeasurementScreen(QWidget):
    backscreen: Signal
    nextscreen: Signal
    captured = Signal()
    __actual_length_edited = False

    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.__current_cfg = None
        self.ui = Ui_MeasurementScreen()
        self.ui.setupUi(self)
        self.build()
        self.binding()

    def build(self):
        self.image1 = ImageWidget()
        self.image2 = ImageWidget()
        self.imageLayout = self.ui.screen1.parentWidget().layout()
        self.imageLayout.replaceWidget(self.ui.screen1, self.image1)
        self.imageLayout.replaceWidget(self.ui.screen2, self.image2)
        self.ui.screen1.deleteLater()
        self.ui.screen2.deleteLater()

    def showEvent(self, event):
        _, self.__current_cfg = DetectorConfig.instance().get_current_cfg()
        self.__set_btn_capture_text()
        self.__load_config()

    # binding
    def binding(self):
        self.backscreen = self.ui.btnBack.clicked
        self.nextscreen = self.ui.btnNext.clicked
        self.ui.btnCapture.clicked.connect(self.btn_capture_clicked)
        self.ui.sldMaximumHeight.valueChanged.connect(
            self.sld_min_height_change)
        self.ui.sldMaximumWidth.valueChanged.connect(self.sld_min_width_change)
        self.ui.sldDectectPosition.valueChanged.connect(
            self.sld_position_change)
        self.ui.sldDetectRange.valueChanged.connect(
            self.sld_detect_range_change)
        self.ui.btnEditActualLength.clicked.connect(
            self.btn_edit_actual_length_clicked)
        self.ui.inpAllowDiff.textChanged.connect(self.inp_allow_diff_changed)
        self.ui.inpLengthUnit.textChanged.connect(self.inp_length_unit_change)
        self.ui.chkMainCamera.stateChanged.connect(
            self.chk_main_camera_state_changed)

    def btn_capture_clicked(self):
        self.captured.emit()
        self.__set_btn_capture_text()

    def __set_btn_capture_text(self):
        timer_active = DetectorConfig.instance().get_timer().isActive()
        self.ui.btnCapture.setText("CAPTURE" if not timer_active else "STOP")

    def chk_main_camera_state_changed(self):
        is_checked = self.ui.chkMainCamera.isChecked()
        is_main = self.__current_cfg["is_main"]
        if is_main != is_checked and self.__actual_length_edited:
            helpers.show_message("You must save actual length first")
            self.ui.chkMainCamera.setCheckState(
                Qt.CheckState.Checked if not is_checked else Qt.CheckState.
                Unchecked)
            return
        elif is_main == is_checked:
            return
        DetectorConfig.instance().get_manager().set_main_config(
            self.__current_cfg["name"] if is_checked else None)
        self.__change_ui_based_on_is_main()

    def __change_ui_based_on_is_main(self):
        is_main = self.__current_cfg["is_main"]
        self.ui.sldDectectPosition.setEnabled(is_main)
        self.ui.inpAllowDiff.setEnabled(is_main)
        self.ui.btnEditActualLength.setEnabled(is_main)
        self.ui.inpLengthUnit.setEnabled(is_main)

    def sld_min_width_change(self):
        value = self.ui.sldMaximumWidth.value()
        self.__current_cfg["min_width_per"] = value / 100
        self.ui.groupSliderWidth.setTitle("Minimum width (%): " + str(value))

    def sld_min_height_change(self):
        value = self.ui.sldMaximumHeight.value()
        self.__current_cfg["min_height_per"] = value / 100
        self.ui.groupSliderHeight.setTitle("Minimum height (%): " + str(value))

    def sld_position_change(self):
        value = self.ui.sldDectectPosition.value()
        self.__current_cfg["stop_condition"] = -value
        self.ui.groupSliderPosition.setTitle("Detect position: " + str(-value))

    def inp_length_unit_change(self):
        value = str(self.ui.inpLengthUnit.text())
        self.ui.groupInputAllowDiff.setTitle(f"Allow Difference ({value})")
        self.__current_cfg["length_unit"] = value

    def sld_detect_range_change(self):
        value = str(self.ui.sldDetectRange.value() / 100)
        self.__current_cfg["detect_range"] = (float(value),
                                              float(1 - float(value)))
        self.ui.groupSliderDetectRange.setTitle("Detect range: " + value)

    def btn_edit_actual_length_clicked(self, event):
        if self.__actual_length_edited:
            value = float(self.ui.inpLeftActualLength.text())
            total_px = float(self.ui.inpLeftDetectedLength.text())
            if (total_px is not None and total_px > 0):
                self.__current_cfg["length_per_10px"] = 0 if (
                    value is None
                    or value == 0) else helper.calculate_length_per10px(
                        total_px, value)
            self.ui.inpLeftActualLength.setEnabled(False)
            self.ui.btnEditActualLength.setText("Edit")
        else:
            self.ui.inpLeftActualLength.setEnabled(True)
            self.ui.btnEditActualLength.setText("Save")
        self.__actual_length_edited = not self.__actual_length_edited

    def inp_allow_diff_changed(self, text):
        value = float(self.ui.inpAllowDiff.text())
        self.__current_cfg["max_size_diff"] = value

    # view camera
    def view_cam(self, image):
        # read image in BGR format
        label_w = self.image1.width()
        label_h = self.image1.height()
        if image is None:
            self.image1.imshow(image)
            self.image2.imshow(image)
            return
        orig = image.copy()
        orig = self.__draw_rectangle_on_image(orig)
        orig = self.__draw_position_line_on_image(orig)
        orig = self.__draw_detect_range(orig)
        dim = (label_w, label_h)
        contour, sizes = self.__process_image(image.copy())
        img_resized = cv2.resize(orig, dim)
        contour_resized = cv2.resize(contour, dim)
        left_length, actual_length = 0, 0
        if len(sizes) > 0:
            length_per_10px = self.__current_cfg["length_per_10px"]
            left_length = sizes[0][0]
            actual_length = 0 if length_per_10px is None or length_per_10px == 0 else helper.calculate_length(
                left_length, length_per_10px)
        self.ui.inpLeftDetectedLength.setText(f"{left_length:.2f}")
        if not self.__actual_length_edited:
            self.ui.inpLeftActualLength.setValue(actual_length)

        self.image1.imshow(img_resized)
        self.image2.imshow(contour_resized)

    # draw functions
    def __draw_rectangle_on_image(self, image):
        image_width = image.shape[1]
        image_height = image.shape[0]

        height_value = self.ui.sldMaximumHeight.value()
        width_value = self.ui.sldMaximumWidth.value()

        rect_width = int(image_width * width_value / 100)
        rect_height = int(image_height * height_value / 100)
        # draw Green rectangle image into image
        return cv2.rectangle(image, (0, 0), (rect_width, rect_height),
                             (0, 0, 255), 3)

    def __draw_position_line_on_image(self, image):
        value = self.ui.sldDectectPosition.value()
        image_width = image.shape[1]
        image_height = image.shape[0]

        position = int(image_width / 2 + value)

        return cv2.line(image, (position, 0), (position, image_height),
                        (255, 0, 0), 3)

    def __draw_detect_range(self, image):
        # convert 0 - 50 to scale of 0.0 to 0.5 (step 0.01)
        # return value
        image_width = image.shape[1]
        image_height = image.shape[0]

        value = str(self.ui.sldDetectRange.value() / 100)

        left_line_ratio = float(value) / 1
        right_line_ratio = 1 - left_line_ratio

        left_line_point = int(left_line_ratio * image_width)
        right_line_point = int(right_line_ratio * image_width)
        self.__current_cfg["detect_range"] = (float(value),
                                              float(1 - float(value)))

        image = cv2.line(image, (left_line_point, 0),
                         (left_line_point, image_height), (0, 255, 0), 3)
        image = cv2.line(image, (right_line_point, 0),
                         (right_line_point, image_height), (0, 255, 0), 3)
        return image

    def __process_image(self, image):
        manager = DetectorConfig.instance().get_manager()
        boxes, proc = manager.extract_boxes(self.__current_cfg, image)
        sizes = []
        for idx, b in enumerate(boxes):
            c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b
            sizes.append((dimA, dimB))
            length_per_10px = self.__current_cfg["length_per_10px"]
            unit = self.__current_cfg["length_unit"]
            if length_per_10px is not None and length_per_10px != 0:
                dimA, dimB = helper.calculate_length(
                    dimA, length_per_10px), helper.calculate_length(
                        dimB, length_per_10px)
            helper.draw_boxes_and_sizes(image, None, box, dimA, dimB, unit, tl,
                                        br)
        return image, sizes

    def __load_config(self):
        min_width = self.__current_cfg["min_width_per"] * 100
        min_height = self.__current_cfg["min_height_per"] * 100
        length_unit = self.__current_cfg["length_unit"]
        stop_condition = self.__current_cfg["stop_condition"]
        detect_range = self.__current_cfg["detect_range"][0] * 100
        max_size_diff = self.__current_cfg["max_size_diff"]
        frame_width = self.__current_cfg["frame_width"]

        self.ui.sldMaximumWidth.setValue(min_width)
        self.ui.sldMaximumHeight.setValue(min_height)
        self.ui.inpLengthUnit.setText(length_unit)
        self.ui.groupInputAllowDiff.setTitle(
            f"Allow Difference ({length_unit})")
        self.ui.sldDectectPosition.setMinimum(-int(frame_width / 2))
        self.ui.sldDectectPosition.setMaximum(int(frame_width / 2))
        self.ui.sldDectectPosition.setValue(-stop_condition)
        self.ui.sldDetectRange.setValue(detect_range)
        self.ui.inpAllowDiff.setValue(max_size_diff)
        self.ui.chkMainCamera.setChecked(self.__current_cfg["is_main"])
        self.ui.inpLeftDetectedLength.setText(str(0))
        self.ui.inpLeftActualLength.setValue(0)
        self.ui.inpLeftActualLength.setEnabled(False)
        self.ui.btnEditActualLength.setText("Edit")
        self.__actual_length_edited = False
        self.__change_ui_based_on_is_main()
class ColorParamCalibrationScreen(QWidget):
    __detected_pair = None
    backscreen: Signal
    nextscreen: Signal
    captured = Signal()
    __max_blue = 0
    __max_green = 0
    __max_red = 0
    __amp_thresh_edited = False

    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.__current_cfg = None
        self.ui = Ui_ColorParamCalibScreen()
        self.ui.setupUi(self)
        self.build()
        self.binding()

    def build(self):
        self.image1 = ImageWidget()
        self.imageLayout = self.ui.screen1.parentWidget().layout()
        self.imageLayout.replaceWidget(self.ui.screen1, self.image1)
        self.ui.screen1.deleteLater()

        self.image_detect_left = ImageWidget()
        self.image_detect_right = ImageWidget()
        self.image_sample_left = ImageWidget()
        self.image_sample_right = ImageWidget()
        self.screen2_layout = self.ui.screen2.layout()
        self.screen2_layout.replaceWidget(self.ui.screen2Left,
                                          self.image_detect_left)
        self.screen2_layout.replaceWidget(self.ui.screen2Right,
                                          self.image_detect_right)
        self.ui.screen2Left.deleteLater()
        self.ui.screen2Right.deleteLater()

        self.screen3_layout = self.ui.screen3.layout()
        self.screen3_layout.replaceWidget(self.ui.screen3Left,
                                          self.image_sample_left)
        self.screen3_layout.replaceWidget(self.ui.screen3Right,
                                          self.image_sample_right)
        self.ui.screen3Left.deleteLater()
        self.ui.screen3Right.deleteLater()

        self.image_detect_left.ui.lblImage.setAlignment(Qt.AlignCenter)
        self.image_detect_right.ui.lblImage.setAlignment(Qt.AlignCenter)
        self.image_sample_left.ui.lblImage.setAlignment(Qt.AlignCenter)
        self.image_sample_right.ui.lblImage.setAlignment(Qt.AlignCenter)
        return

    def showEvent(self, event):
        _, self.__current_cfg = DetectorConfig.instance().get_current_cfg()
        self.ui.inpResult.setHtml("<b>RESULT</b>")
        self.__view_image_sample()
        self.__set_btn_capture_text()
        self.__load_config()

    # binding
    def binding(self):
        self.backscreen = self.ui.btnBack.clicked
        self.nextscreen = self.ui.btnNext.clicked
        self.ui.btnCapture.clicked.connect(self.btn_capture_clicked)
        self.ui.inpSuppThreshold.textChanged.connect(
            self.inp_sup_thresh_change)
        self.ui.sldAllowDiff.valueChanged.connect(self.sld_allow_diff_change)
        self.ui.sldAmpRate.valueChanged.connect(self.sld_amp_rate_change)
        self.ui.ampThreshBlue.textChanged.connect(self.amp_threshold_change)
        self.ui.ampThreshGreen.textChanged.connect(self.amp_threshold_change)
        self.ui.ampThreshRed.textChanged.connect(self.amp_threshold_change)
        self.ui.chkColorCompare.stateChanged.connect(
            self.chk_color_enabled_state_changed)
        self.ui.btnEditAmpThresh.clicked.connect(
            self.btn_edit_amp_thresh_clicked)

    def btn_capture_clicked(self):
        self.captured.emit()
        self.__set_btn_capture_text()

    def __set_btn_capture_text(self):
        timer_active = DetectorConfig.instance().get_timer().isActive()
        self.ui.btnCapture.setText("CAPTURE" if not timer_active else "STOP")

    def amp_threshold_change(self):
        amp_thresh_green_value = float(self.ui.ampThreshGreen.text())
        amp_thresh_red_value = float(self.ui.ampThreshRed.text())
        amp_thresh_blue_value = float(self.ui.ampThreshBlue.text())
        self.__current_cfg["color_cfg"]["amplify_thresh"] = (
            amp_thresh_blue_value, amp_thresh_green_value,
            amp_thresh_red_value)

    def sld_amp_rate_change(self):
        value = self.ui.sldAmpRate.value()
        self.ui.grpSldAmpRate.setTitle("Amplification Rate: " + str(value))
        self.__current_cfg["color_cfg"]["amplify_rate"] = value

    def inp_sup_thresh_change(self):
        value = self.ui.inpSuppThreshold.value()
        self.__current_cfg["color_cfg"]["supp_thresh"] = value

    def sld_allow_diff_change(self):
        value = self.ui.sldAllowDiff.value()
        self.ui.grpSldAllowDiff.setTitle("Allowed Difference (%): " +
                                         str(value))
        self.__current_cfg["color_cfg"]["max_diff"] = value / 100

    def chk_color_enabled_state_changed(self):
        checked = self.ui.chkColorCompare.isChecked()
        self.__current_cfg["is_color_enable"] = checked

    def __load_config(self):
        color_cfg = self.__current_cfg["color_cfg"]
        amplify_rate = color_cfg["amplify_rate"]
        supp_thresh = color_cfg["supp_thresh"]
        max_diff = color_cfg["max_diff"]
        is_color_enable = self.__current_cfg["is_color_enable"]
        amp_thresh = color_cfg["amplify_thresh"]

        self.ui.sldAllowDiff.setValue(max_diff * 100)
        self.ui.sldAmpRate.setValue(amplify_rate)
        self.ui.inpSuppThreshold.setValue(supp_thresh)
        self.ui.ampThreshBlue.setValue(amp_thresh[0])
        self.ui.ampThreshGreen.setValue(amp_thresh[1])
        self.ui.ampThreshRed.setValue(amp_thresh[2])

        self.ui.grpSldAmpRate.setTitle("Amplification Rate: " +
                                       str(amplify_rate))
        self.ui.grpSldAllowDiff.setTitle("Allowed Difference (%): " +
                                         str(max_diff * 100))
        self.ui.chkColorCompare.setChecked(is_color_enable)

        self.ui.btnEditAmpThresh.setText("Edit")
        self.ui.ampThreshRed.setEnabled(False)
        self.ui.ampThreshGreen.setEnabled(False)
        self.ui.ampThreshBlue.setEnabled(False)

    def __view_image_sample(self):
        manager = DetectorConfig.instance().get_manager()
        left = manager.get_sample_left()
        right = manager.get_sample_right()
        m_left, m_right = self.__preprocess_color(left, right)
        label_h = self.ui.screen2.height()
        img_size = (156, label_h - 30)
        m_left = cv2.resize(m_left, img_size, interpolation=cv2.INTER_AREA)
        m_right = cv2.resize(m_right, img_size, interpolation=cv2.INTER_AREA)
        self.image_sample_left.imshow(m_left)
        self.image_sample_right.imshow(m_right)

    def view_cam(self, image):
        # read image in BGR format
        label_w = self.image1.width()
        label_h = self.image1.height()
        dim = (label_w, label_h)
        if image is None:
            self.image1.imshow(image)
            self.image_detect_left.imshow(image)
            self.image_detect_right.imshow(image)
            return
        img_size = (156, label_h - 30)
        contour, detected, detected_pair = self.__process_pair(image.copy())
        contour_resized = cv2.resize(contour, dim)
        self.image1.imshow(contour_resized)
        if detected_pair is not None:
            left, right = self.__preprocess_color(detected_pair[0],
                                                  detected_pair[1])
            trio.run(self.__find_amp_threshold, left, right)
            left = cv2.resize(left, img_size)
            right = cv2.resize(right, img_size)
            self.image_detect_left.imshow(left)
            self.image_detect_right.imshow(right)
            self.__detected_pair = detected_pair

    async def __find_amp_threshold(self, img_left, img_right):
        manager = DetectorConfig.instance().get_manager()
        sample_left = manager.get_sample_left()
        sample_right = manager.get_sample_right()
        sample_left, sample_right = self.__preprocess_color(
            sample_left, sample_right)

        left_task, right_task = await manager.compare_colors(
            self.__current_cfg, img_left, img_right, sample_left, sample_right,
            not self.__amp_thresh_edited, None)
        _, avg_diff_l, left_hist, is_diff_l = left_task
        _, avg_diff_r, right_hist, is_diff_r = right_task
        blue = max(left_hist[0], right_hist[0])
        green = max(left_hist[1], right_hist[1])
        red = max(left_hist[2], right_hist[2])
        max_blue, max_green, max_red = self.__max_blue, self.__max_green, self.__max_red
        if (blue > max_blue): max_blue = blue
        if (green > max_green): max_green = green
        if (red > max_red): max_red = red
        amp_thresh = (float(max_blue), float(max_green), float(max_red))
        if self.__amp_thresh_edited:
            self.__max_blue = max_blue
            self.__max_red = max_red
            self.__max_green = max_green
            self.ui.ampThreshBlue.setValue(amp_thresh[0])
            self.ui.ampThreshGreen.setValue(amp_thresh[1])
            self.ui.ampThreshRed.setValue(amp_thresh[2])
            self.__current_cfg["color_cfg"]["amplify_thresh"] = amp_thresh

        # result display
        cur = datetime.datetime.now()
        cur_date_str = cur.strftime(ISO_DATE_FORMAT)
        result_text = f"<b>RESULT</b><br/>" + f"<b>Time</b>: {cur_date_str}<br/>"
        avg_diff_l *= 100
        avg_diff_r *= 100
        r, g, b = amp_thresh[0], amp_thresh[1], amp_thresh[2]
        result_text += f"<b>Current different value</b>: {r:.2f}, {g:.2f}, {b:.2f}<br/>"
        result_text += f"<b>Left different</b>: {avg_diff_l:.2f}%<br/>"
        result_text += f"<b>Right different</b>: {avg_diff_r:.2f}%<br/>"
        if not self.__amp_thresh_edited:
            left_result_text = "PASSED" if not is_diff_l else "FAILED"
            right_result_text = "PASSED" if not is_diff_r else "FAILED"
            result_text += f"<b>Result</b>: Left {left_result_text} - Right {right_result_text}"
        self.ui.inpResult.setHtml(result_text)

    def btn_edit_amp_thresh_clicked(self, event):
        if self.__amp_thresh_edited:
            self.ui.ampThreshRed.setEnabled(False)
            self.ui.ampThreshGreen.setEnabled(False)
            self.ui.ampThreshBlue.setEnabled(False)
            self.ui.btnEditAmpThresh.setText("Edit")
            self.__amp_thresh_edited = False
        else:
            self.ui.ampThreshRed.setEnabled(True)
            self.ui.ampThreshGreen.setEnabled(True)
            self.ui.ampThreshBlue.setEnabled(True)
            self.ui.btnEditAmpThresh.setText("Save")
            self.__amp_thresh_edited = True

    #image process function
    def __preprocess_color(self, sample_left, sample_right):
        manager = DetectorConfig.instance().get_manager()
        pre_sample_left = manager.preprocess(self.__current_cfg, sample_left,
                                             True)
        pre_sample_right = manager.preprocess(self.__current_cfg, sample_right,
                                              False)
        return pre_sample_left, pre_sample_right

    def __process_pair(self, image):
        manager = DetectorConfig.instance().get_manager()
        boxes, proc = manager.extract_boxes(self.__current_cfg, image)
        final_grouped, sizes, check_group_idx, pair, split_left, split_right, image_detect = manager.detect_groups_and_checked_pair(
            self.__current_cfg, boxes, image)
        unit = self.__current_cfg["length_unit"]
        for idx, group in enumerate(final_grouped):
            for b_idx, b in enumerate(group):
                c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b
                cur_size = sizes[idx][b_idx]
                lH, lW = cur_size
                helper.draw_boxes_and_sizes(image, idx, box, lH, lW, unit, tl,
                                            br)
        if (pair is not None):
            manager.check_group(check_group_idx, final_grouped)
            left, right = pair
            left, right = left[0], right[0]
            left = cv2.flip(left, 1)
            label_w = self.image1.width()
            label_h = self.image1.height()
            images = [left, right]
            self.__detected_pair = images
            final_img = helpers.concat_images(images, label_w, label_h)
            return image, final_img, images
        return image, None, None
class AsymConfigScreen(QWidget):
    MIN_SIMILARITY_STEP = 0.01
    backscreen: Signal
    nextscreen: Signal
    captured = Signal()

    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.__current_cfg = None
        self.ui = Ui_AsymConfigScreen()
        self.ui.setupUi(self)
        self.build()
        self.binding()

    def build(self):
        self.image1 = ImageWidget()
        self.imageLayout = self.ui.screen1.parentWidget().layout()
        self.imageLayout.replaceWidget(self.ui.screen1, self.image1)
        self.ui.screen1.deleteLater()

        self.image_detect_left = ImageWidget()
        self.image_detect_right = ImageWidget()
        self.image_sample_left = ImageWidget()
        self.image_sample_right = ImageWidget()

        self.screen2_layout = self.ui.screen2.layout()
        self.screen2_layout.replaceWidget(self.ui.screen2Left,
                                          self.image_detect_left)
        self.screen2_layout.replaceWidget(self.ui.screen2Right,
                                          self.image_detect_right)
        self.ui.screen2Left.deleteLater()
        self.ui.screen2Right.deleteLater()

        self.screen3_layout = self.ui.screen3.layout()
        self.screen3_layout.replaceWidget(self.ui.screen3Left,
                                          self.image_sample_left)
        self.screen3_layout.replaceWidget(self.ui.screen3Right,
                                          self.image_sample_right)
        self.ui.screen3Left.deleteLater()
        self.ui.screen3Right.deleteLater()

        self.image_detect_left.ui.lblImage.setAlignment(Qt.AlignCenter)
        self.image_detect_right.ui.lblImage.setAlignment(Qt.AlignCenter)
        self.image_sample_left.ui.lblImage.setAlignment(Qt.AlignCenter)
        self.image_sample_right.ui.lblImage.setAlignment(Qt.AlignCenter)

    # binding
    def binding(self):
        self.backscreen = self.ui.btnBack.clicked
        self.nextscreen = self.ui.btnNext.clicked
        self.ui.btnCapture.clicked.connect(self.btn_capture_clicked)
        self.ui.sldAmpRate.valueChanged.connect(
            self.sld_amplification_rate_changed)
        self.ui.sldMinSimilarity.valueChanged.connect(
            self.sld_min_similarity_changed)
        self.ui.inpReCalcFactorRight.textChanged.connect(
            self.inp_re_calc_right_changed)
        self.ui.inpReCalcFactorLeft.textChanged.connect(
            self.inp_re_calc_left_changed)
        self.ui.inpAmpThresh.textChanged.connect(
            self.inp_asym_amp_thresh_changed)
        self.ui.inpC1.textChanged.connect(self.inp_C1_changed)
        self.ui.inpC2.textChanged.connect(self.inp_C2_changed)
        self.ui.inpPSNR.textChanged.connect(self.inp_PSNR_changed)
        self.ui.cbbSegments.currentIndexChanged.connect(
            self.cbb_segments_current_index_changed)

    # handlers
    def btn_capture_clicked(self):
        self.captured.emit()
        self.__set_btn_capture_text()

    def __set_btn_capture_text(self):
        timer_active = DetectorConfig.instance().get_timer().isActive()
        self.ui.btnCapture.setText("CAPTURE" if not timer_active else "STOP")

    def sld_amplification_rate_changed(self):
        value = self.ui.sldAmpRate.value()
        self.__current_cfg["sim_cfg"]["asym_amp_rate"] = value
        self.ui.grpBoxAmpRate.setTitle("Amplification rate: " + str(value))

    def sld_min_similarity_changed(self):
        value = round(
            self.ui.sldMinSimilarity.value() * self.MIN_SIMILARITY_STEP, 2)
        self.__current_cfg["sim_cfg"]["min_similarity"] = value
        self.ui.grpBoxMinSimilarity.setTitle("Minimum similarity (%): " +
                                             str(value))

    def showEvent(self, event):
        _, self.__current_cfg = DetectorConfig.instance().get_current_cfg()
        self.ui.inpResult.setHtml("<b>RESULT</b>")
        self.__set_btn_capture_text()
        self.__view_image_sample()
        self.__load_config()

    def cbb_segments_current_index_changed(self):
        selected = "[" + self.ui.cbbSegments.currentText() + "]"
        segments = json.loads(selected)
        self.__current_cfg["sim_cfg"]["segments_list"] = segments

    def __load_config(self):
        sim_cfg = self.__current_cfg["sim_cfg"]
        c1 = sim_cfg["C1"]
        c2 = sim_cfg["C2"]
        psnr = sim_cfg["psnr_trigger"]
        amp_thresh = sim_cfg["asym_amp_thresh"]
        amp_rate = sim_cfg["asym_amp_rate"]
        min_similarity = sim_cfg["min_similarity"]
        re_calc_factor_left = sim_cfg["re_calc_factor_left"]
        re_calc_factor_right = sim_cfg["re_calc_factor_right"]
        segments_list = sim_cfg["segments_list"]
        segments_str = str(segments_list).replace('[', '').replace(']', '')
        #---------------------------------------#
        min_similarity_slider_val = round(
            min_similarity / self.MIN_SIMILARITY_STEP, 0)
        if (amp_thresh is None): amp_thresh = 1
        sim_cfg["asym_amp_thresh"] = amp_thresh
        #---------------------------------------#
        self.ui.inpC1.setValue(c1)
        self.ui.inpC2.setValue(c2)
        self.ui.inpPSNR.setValue(float(psnr))
        self.ui.inpAmpThresh.setValue(float(amp_thresh))
        self.ui.inpReCalcFactorLeft.setValue(float(re_calc_factor_left))
        self.ui.inpReCalcFactorRight.setValue(float(re_calc_factor_right))
        self.ui.cbbSegments.setCurrentText(segments_str)
        self.ui.sldMinSimilarity.setValue(int(min_similarity_slider_val))
        self.ui.grpBoxMinSimilarity.setTitle("Minimum similarity (%): " +
                                             str(min_similarity))
        self.ui.sldAmpRate.setValue(int(amp_rate))
        self.ui.grpBoxAmpRate.setTitle("Amplification rate: " + str(amp_rate))

    def __view_image_sample(self):
        manager = DetectorConfig.instance().get_manager()
        left = manager.get_sample_left()
        right = manager.get_sample_right()
        self.__sample_left, self.__sample_right = self.__preprocess_color(
            left, right)
        label_h = self.image1.height()
        img_size = (156, label_h - 30)
        m_left = cv2.resize(self.__sample_left,
                            img_size,
                            interpolation=cv2.INTER_AREA)
        m_right = cv2.resize(self.__sample_right,
                             img_size,
                             interpolation=cv2.INTER_AREA)
        self.image_sample_left.imshow(m_left)
        self.image_sample_right.imshow(m_right)

    def inp_re_calc_left_changed(self):
        value = self.ui.inpReCalcFactorLeft.value()
        self.__current_cfg["sim_cfg"]["re_calc_factor_left"] = value
        return

    def inp_re_calc_right_changed(self):
        value = self.ui.inpReCalcFactorRight.value()
        self.__current_cfg["sim_cfg"]["re_calc_factor_right"] = value
        return

    def inp_asym_amp_thresh_changed(self):
        value = self.ui.inpAmpThresh.value()
        self.__current_cfg["sim_cfg"]["asym_amp_thresh"] = value
        return

    def inp_C1_changed(self):
        value = self.ui.inpC1.value()
        self.__current_cfg["sim_cfg"]["C1"] = value
        return

    def inp_C2_changed(self):
        value = self.ui.inpC2.value()
        self.__current_cfg["sim_cfg"]["C2"] = value
        return

    def inp_PSNR_changed(self):
        value = self.ui.inpPSNR.value()
        self.__current_cfg["sim_cfg"]["psnr_trigger"] = value
        return

    def view_cam(self, image):
        # read image in BGR format
        label_w = self.image1.width()
        label_h = self.image1.height()
        dim = (label_w, label_h)
        if image is None:
            self.image1.imshow(image)
            self.image_detect_left.imshow(image)
            self.image_detect_right.imshow(image)
            return
        img_size = (156, label_h - 30)
        contour, detected, detected_pair = self.__process_pair(image.copy())
        contour_resized = cv2.resize(contour, dim)
        self.image1.imshow(contour_resized)
        if detected_pair is not None:
            left, right = self.__preprocess_color(detected_pair[0],
                                                  detected_pair[1])
            trio.run(self.__detect_asym_diff, left, right)
            left = cv2.resize(left, img_size)
            right = cv2.resize(right, img_size)
            self.image_detect_left.imshow(left)
            self.image_detect_right.imshow(right)

    async def __detect_asym_diff(self, left, right):
        sim_cfg = self.__current_cfg["sim_cfg"]
        min_sim = sim_cfg['min_similarity']
        manager = DetectorConfig.instance().get_manager()
        left_result, right_result = await manager.detect_asym(
            self.__current_cfg, left, right, self.__sample_left,
            self.__sample_right, None)
        is_asym_diff_left, avg_asym_left, avg_amp_left, recalc_left, res_list_l, amp_res_list_l = left_result
        is_asym_diff_right, avg_asym_right, avg_amp_right, recalc_right, res_list_r, amp_res_list_r = right_result

        # calculate calc_factor both side and then keep the highest only
        tmp_re_calc_factor_left = avg_asym_left / avg_amp_left
        tmp_re_calc_factor_right = avg_asym_right / avg_amp_right
        if (tmp_re_calc_factor_left > sim_cfg['re_calc_factor_left']):
            sim_cfg['re_calc_factor_left'] = tmp_re_calc_factor_left
        if (tmp_re_calc_factor_right > sim_cfg['re_calc_factor_right']):
            sim_cfg['re_calc_factor_right'] = tmp_re_calc_factor_right

        # update result to screen
        self.ui.inpReCalcFactorLeft.setValue(sim_cfg["re_calc_factor_left"])
        self.ui.inpReCalcFactorRight.setValue(sim_cfg["re_calc_factor_right"])

        # result display
        cur = datetime.datetime.now()
        cur_date_str = cur.strftime(ISO_DATE_FORMAT)
        left_result_text = "PASSED" if not is_asym_diff_left else "FAILED"
        right_result_text = "PASSED" if not is_asym_diff_right else "FAILED"
        result_text = f"<b>RESULT</b><br/>" + f"<b>Time</b>: {cur_date_str}<br/>"
        result_text += f"<b>Original similarity</b>: Left {avg_asym_left:.2f} - Right {avg_asym_right:.2f}<br/>"
        result_text += f"<b>Final similarity</b>: Left {recalc_left:.2f} - Right {recalc_right:.2f}<br/>"
        result_text += f"<b>Final result</b>: Left {left_result_text} - Right {right_result_text}"
        self.ui.inpResult.setHtml(result_text)

    def __preprocess_color(self, sample_left, sample_right):
        manager = DetectorConfig.instance().get_manager()
        pre_sample_left = manager.preprocess(self.__current_cfg, sample_left,
                                             True)
        pre_sample_right = manager.preprocess(self.__current_cfg, sample_right,
                                              False)
        return pre_sample_left, pre_sample_right

    def __process_pair(self, image):
        manager = DetectorConfig.instance().get_manager()
        boxes, proc = manager.extract_boxes(self.__current_cfg, image)
        final_grouped, sizes, check_group_idx, pair, split_left, split_right, image_detect = manager.detect_groups_and_checked_pair(
            self.__current_cfg, boxes, image)
        unit = self.__current_cfg["length_unit"]
        for idx, group in enumerate(final_grouped):
            for b_idx, b in enumerate(group):
                c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b
                cur_size = sizes[idx][b_idx]
                lH, lW = cur_size
                helper.draw_boxes_and_sizes(image, idx, box, lH, lW, unit, tl,
                                            br)
        if pair is not None:
            manager.check_group(check_group_idx, final_grouped)
            left, right = pair
            left, right = left[0], right[0]
            left = cv2.flip(left, 1)
            label_w = self.image1.width()
            label_h = self.image1.height()
            images = [left, right]
            self.__detected_pair = images
            final_img = helpers.concat_images(images, label_w, label_h)
            return image, final_img, images
        return image, None, None
示例#6
0
class ProgressScreen(QWidget):
    return_home = Signal()
    __result_html_changed = Signal(str)
    __sample_left = None
    __sample_right = None
    __pre_sample_left = None
    __pre_sample_right = None
    __last_display_type = None
    __capturing = True

    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.__camera_timer = QTimer()
        self.__storage_path = AppConfig.instance().config["storage_path"]
        self.__api_url = AppConfig.instance().config["api_url"]
        self.ui = Ui_ProgressScreen()
        self.ui.setupUi(self)
        self.build()
        self.binding()

    def build(self):
        self.image1 = ImageWidget()
        self.side_result_image = ImageWidget()
        self.left_detected_image = ImageWidget()
        self.right_detected_image = ImageWidget()
        self.left_sample_image = ImageWidget()
        self.right_sample_image = ImageWidget()

        self.imageLayout = self.ui.screen1.parentWidget().layout()
        self.imageLayout.replaceWidget(self.ui.screen1, self.image1)
        self.ui.screen1.deleteLater()

        self.mainCamLayout = self.ui.mainResult.layout()
        self.mainCamLayout.replaceWidget(self.ui.detected_L,
                                         self.left_detected_image)
        self.mainCamLayout.replaceWidget(self.ui.detected_R,
                                         self.right_detected_image)
        self.mainCamLayout.replaceWidget(self.ui.sample_L,
                                         self.left_sample_image)
        self.mainCamLayout.replaceWidget(self.ui.sample_R,
                                         self.right_sample_image)
        self.ui.detected_L.deleteLater()
        self.ui.detected_R.deleteLater()
        self.ui.sample_L.deleteLater()
        self.ui.sample_R.deleteLater()

        self.sideCamLayout = self.ui.sectionSideResult.layout()
        self.sideCamLayout.replaceWidget(self.ui.lblSideResult,
                                         self.side_result_image)
        self.ui.lblSideResult.deleteLater()

    def showEvent(self, event):
        manager = DetectorConfig.instance().get_manager()
        main_idx, self.__main_cfg = manager.get_main_config()
        self.__capturing = True
        video_cameras = DetectorConfig.instance().get_video_cameras()
        configs = manager.get_configs()
        self.__main_cam = video_cameras[main_idx]
        for i, cfg in enumerate(configs):
            video_cameras[i].open(cfg["camera_uri"])
            # test only
            # video_cameras[i].open(Videos.instance().next())

        self.__sample_left, self.__sample_right = manager.get_sample_left(
        ), manager.get_sample_right()
        self.__pre_sample_left, self.__pre_sample_right = manager.preprocess_images(
            self.__main_cfg, self.__sample_left, self.__sample_right)

        self.image1.imshow(None)
        self.left_detected_image.imshow(None)
        self.right_detected_image.imshow(None)
        self.side_result_image.imshow(None)
        self.ui.inpResult.setHtml("<b>RESULT</b>")
        self.__set_btn_capture_text()
        self.__view_image_sample()
        self.__load_config()
        self.__camera_timer.start(20)
        return

    def hideEvent(self, event):
        self.__release()

    def closeEvent(self, event):
        self.__release()

    def __release(self):
        self.__camera_timer.stop()
        manager = DetectorConfig.instance().get_manager()
        video_cameras = DetectorConfig.instance().get_video_cameras()
        configs = manager.get_configs()
        for i, cfg in enumerate(configs):
            video_cameras[i].release()

    def validate_show(self):
        manager = DetectorConfig.instance().get_manager()
        _, main_cfg = manager.get_main_config()
        if main_cfg is None: return "No main configuration available"
        has_model = manager.get_model() is not None
        configs = manager.get_configs()
        for cfg in configs:
            if cfg["is_defect_enable"] and not has_model:
                return "Defect detection enabled but no model founded"
        if manager.get_sample_left() is None or manager.get_sample_right(
        ) is None:
            return "Samples not found"
        return None

    # data binding
    def binding(self):
        self.return_home = self.ui.btnReturnHome.clicked
        self.ui.btnCapture.clicked.connect(self.btn_capture_clicked)
        self.ui.cbbDisplayType.currentIndexChanged.connect(
            self.cbb_display_type_index_changed)
        self.__camera_timer.timeout.connect(self.camera_timer_timeout)
        self.__result_html_changed.connect(self.__handle_result_html_changed)

    def btn_capture_clicked(self):
        if self.__camera_timer.isActive():
            self.__capturing = False
            self.__camera_timer.stop()
        else:
            self.__capturing = True
            self.__camera_timer.start(20)
        self.__set_btn_capture_text()

    def cbb_display_type_index_changed(self):
        self.__last_display_type = self.ui.cbbDisplayType.currentText()

    def __set_btn_capture_text(self):
        self.ui.btnCapture.setText(
            "CAPTURE" if not self.__capturing else "STOP")

    def camera_timer_timeout(self):
        manager = DetectorConfig.instance().get_manager()
        _, image = self.__main_cam.read()
        if image is None:
            # test only
            # self.__main_cam.open(Videos.instance().next())
            # _, image = self.__main_cam.read()
            self.image1.imshow(image)
            return
        frame_width, frame_height = self.__main_cfg[
            "frame_width"], self.__main_cfg["frame_height"]
        image = cv2.resize(image, (frame_width, frame_height))
        trio.run(self.process_image, image)

    def __view_image_sample(self):
        manager = DetectorConfig.instance().get_manager()
        label_w, label_h = self.left_detected_image.width(
        ), self.left_detected_image.height()
        dim = (label_w, label_h)
        m_left = cv2.resize(self.__sample_left, dim)
        m_right = cv2.resize(self.__sample_right, dim)
        self.left_sample_image.imshow(m_left)
        self.right_sample_image.imshow(m_right)

    async def process_image(self, image):
        label_w = self.image1.width()
        label_h = self.image1.height()
        dim = (label_w, label_h)
        manager = DetectorConfig.instance().get_manager()
        idx, main_cfg = manager.get_main_config()
        if self.__last_display_type == "Original":
            resized_image = cv2.resize(image, dim)
            self.image1.imshow(resized_image)
        boxes, proc = manager.extract_boxes(main_cfg, image)
        final_grouped, sizes, check_group_idx, pair, split_left, split_right, image_detect = manager.detect_groups_and_checked_pair(
            main_cfg, boxes, image)

        if self.__last_display_type == "Detection":
            unit = main_cfg["length_unit"]
            for idx, group in enumerate(final_grouped):
                for b_idx, b in enumerate(group):
                    c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b
                    cur_size = sizes[idx][b_idx]
                    lH, lW = cur_size
                    helper.draw_boxes_and_sizes(image, idx, box, lH, lW, unit,
                                                tl, br)
            resized_image = cv2.resize(image, dim)
            self.image1.imshow(resized_image)
        elif self.__last_display_type == "Contours":
            resized_image = cv2.resize(proc, dim)
            self.image1.imshow(resized_image)

        if (pair is not None):
            manager.check_group(check_group_idx, final_grouped)
            left, right = pair
            left, right = left[0], right[0]
            left = cv2.flip(left, 1)
            label_w = self.left_detected_image.width()
            label_h = self.left_detected_image.height()
            dim = (label_w, label_h)
            left_resized, right_resized = cv2.resize(left, dim), cv2.resize(
                right, dim)
            self.left_detected_image.imshow(left_resized)
            self.right_detected_image.imshow(right_resized)
            self.__last_detect_time = datetime.datetime.now()
            runnable = WorkerRunnable(self.__process_pair,
                                      self.__last_detect_time,
                                      check_group_idx,
                                      final_grouped,
                                      sizes,
                                      pair,
                                      parent=self)
            runnable.work_error.connect(lambda ex: print(ex))
            QThreadPool.globalInstance().start(runnable)

    async def __process_pair(self, cur: datetime.datetime, check_group_idx,
                             final_grouped, sizes, pair):
        manager = DetectorConfig.instance().get_manager()
        manager.check_group(check_group_idx, final_grouped)
        check_size = sizes[check_group_idx]
        h_diff, w_diff = manager.compare_size(self.__main_cfg, check_size)

        # output
        left, right = pair
        left, right = left[0], right[0]
        left = cv2.flip(left, 1)
        pre_left, pre_right = manager.preprocess_images(
            self.__main_cfg, left, right)
        images = [left, right]
        final_save_images = [left.copy(), right.copy()]
        side_images = []
        # Similarity compare
        sim_cfg = self.__main_cfg["sim_cfg"]
        left_result, right_result = await manager.detect_asym(
            self.__main_cfg, pre_left, pre_right, self.__pre_sample_left,
            self.__pre_sample_right, None)
        is_asym_diff_left, avg_asym_left, avg_amp_left, recalc_left, res_list_l, amp_res_list_l = left_result
        is_asym_diff_right, avg_asym_right, avg_amp_right, recalc_right, res_list_r, amp_res_list_r = right_result
        has_asym = is_asym_diff_left or is_asym_diff_right
        has_color_checked, has_error_checked = False, False
        result_dict = {}
        async with trio.open_nursery() as nursery:
            if has_asym:
                if self.__main_cfg["is_color_enable"]:
                    has_color_checked = True
                    nursery.start_soon(manager.compare_colors, self.__main_cfg,
                                       pre_left, pre_right,
                                       self.__pre_sample_left,
                                       self.__pre_sample_right, True,
                                       (result_dict, "color_results"))

                if self.__main_cfg["is_defect_enable"]:
                    has_error_checked = True
                    nursery.start_soon(manager.detect_errors, self.__main_cfg,
                                       images, (result_dict, "err_results"))

            video_cameras = DetectorConfig.instance().get_video_cameras()
            configs = manager.get_configs()
            for idx, cfg in enumerate(configs):
                if cfg["is_main"] == True: continue
                cfg_name = cfg["name"]
                nursery.start_soon(self.__activate_side_cam, cfg,
                                   video_cameras[idx], manager,
                                   (result_dict, f"side_result_{cfg_name}"))

        side_results = []
        for key in result_dict.keys():
            if key.startswith("side_result_"):
                result = result_dict[key]
                if result is not None:
                    side_results.append(result)

        # result display
        if cur == self.__last_detect_time:
            self.side_result_image.imshow(None)
            self.__result_html_changed.emit("<b>RESULT</b>")
        defect_types = set()
        size_result = "<span style='color:green'>PASSED</span>"
        left_asym_result = "<span style='color:green'>PASSED</span>"
        right_asym_result = "<span style='color:green'>PASSED</span>"
        if h_diff or w_diff:
            size_result = "<span style='color:red'>FAILED: Different size</span>"
            defect_types.add(fqcs_constants.SIZE_MISMATCH)
        if is_asym_diff_left:
            left_asym_result = "<span style='color:red'>FAILED: Different from sample</span>"
            defect_types.add(fqcs_constants.SAMPLE_MISMATCH)
        if is_asym_diff_right:
            right_asym_result = "<span style='color:red'>FAILED: Different from sample</span>"
            defect_types.add(fqcs_constants.SAMPLE_MISMATCH)
        # output
        defect_result = "<span style='color:green'>NOT ANY</span>"
        defects = {}
        err_cfg = self.__main_cfg["err_cfg"]
        min_score = err_cfg["yolo_score_threshold"]
        classes_labels = err_cfg["classes"]
        if has_error_checked:
            boxes, scores, classes, valid_detections = result_dict[
                "err_results"]
            helper.draw_yolo_results(images,
                                     boxes,
                                     scores,
                                     classes,
                                     classes_labels,
                                     err_cfg["img_size"],
                                     min_score=min_score)
            self.__parse_defects_detection_result(images, scores, classes,
                                                  classes_labels, min_score,
                                                  defects)
            if cur == self.__last_detect_time:
                label_w = self.left_detected_image.width()
                label_h = self.left_detected_image.height()
                dim = (label_w, label_h)
                left_img = cv2.resize(images[0], dim)
                right_img = cv2.resize(images[1], dim)
                self.left_detected_image.imshow(left_img)
                self.right_detected_image.imshow(right_img)

        label_w = self.side_result_image.width()
        label_h = self.side_result_image.height()
        for res in side_results:
            side_save_images, side_images, side_boxes, side_scores, side_classes, side_valid_detections = res
            final_save_images.extend(side_save_images)
            self.__parse_defects_detection_result(side_images, side_scores,
                                                  side_classes, classes_labels,
                                                  min_score, defects)
            if cur == self.__last_detect_time:
                final_img = helpers.concat_images(side_images, label_w,
                                                  label_h)
                self.side_result_image.imshow(final_img)

        defect_result_text = []
        for key in defects.keys():
            defect_types.add(key)
            d_count = defects[key]
            defect_result_text.append(f"{key}: {d_count}")
        if len(defects) > 0:
            defect_result_text = ", ".join(defect_result_text)
            defect_result_text = f"<span style='color:red'>{defect_result_text}</span>"
            defect_result = defect_result_text

        # output
        if has_color_checked:
            left_color_result = "<span style='color:green'>PASSED</span>"
            right_color_result = "<span style='color:green'>PASSED</span>"
            left_c_results = result_dict["color_results"][0]
            right_c_results = result_dict["color_results"][1]
            if left_c_results[3]:
                left_color_result = "<span style='color:red'>FAILED: Different color</span>"
                defect_types.add(fqcs_constants.COLOR_MISMATCH)
            if right_c_results[3]:
                right_color_result = "<span style='color:red'>FAILED: Different color</span>"
                defect_types.add(fqcs_constants.COLOR_MISMATCH)

        cur_date_str = cur.strftime(ISO_DATE_FORMAT)
        result_text = f"<b>RESULT</b><br/>" + f"<b>Time</b>: {cur_date_str}<br/><hr/>"
        result_text += f"<b>Size</b>: {size_result}<br/><hr/>"
        result_text += f"<b>Similarity of left</b>: {left_asym_result}<br/>"
        result_text += f"<b>Similarity of right</b>: {right_asym_result}<br/><hr/>"
        if has_color_checked:
            result_text += f"<b>Color of left</b>: {left_color_result}<br/>"
            result_text += f"<b>Color of right</b>: {right_color_result}<br/><hr/>"
        result_text += f"<b>Defects</b>: {defect_result}<br/>"
        # test only
        # result_text += f"{defect_types}"

        if cur == self.__last_detect_time:
            self.__result_html_changed.emit(result_text)

        # save images
        folder = cur.strftime(FOLDER_DATE_FORMAT)
        os.makedirs(os.path.join(self.__storage_path, folder), exist_ok=True)

        images = []
        for img in final_save_images:
            img_name = str(uuid.uuid4()) + ".jpg"
            rel_path = os.path.join(folder, img_name)
            abs_path = os.path.join(self.__storage_path, rel_path)
            cv2.imwrite(abs_path, img)
            images.append(rel_path)

        # send to api
        access_token = AuthInfo.instance().get_token_info()["access_token"]
        headers = fqcs_api.get_common_headers(auth_token=access_token)
        is_success, resp = fqcs_api.submit_event(self.__api_url, defect_types,
                                                 images[0], images[1],
                                                 images[2:], headers)
        print(is_success, resp)
        return

    async def __activate_side_cam(self, cfg, cam, manager, result_info):
        _, image = cam.read()
        # test only
        # rd = np.random.randint(0, 100)
        # print("Rand", rd)
        # if rd > 75:
        #     _, image = self.__main_cam.read()
        #     frame_width, frame_height = cfg["frame_width"], cfg["frame_height"]
        #     resized_image = cv2.resize(image, (frame_width, frame_height))
        #     boxes, proc = manager.extract_boxes(cfg, resized_image)
        #     image_detect = resized_image.copy()
        #     pair, image_detect, boxes = manager.detect_pair_side_cam(
        #         cfg, boxes, image_detect)
        # else:
        #     pair = [(cv2.imread("./resources/test_data/side_demo.jpg"), None)]

        frame_width, frame_height = cfg["frame_width"], cfg["frame_height"]
        resized_image = cv2.resize(image, (frame_width, frame_height))
        boxes, proc = manager.extract_boxes(cfg, resized_image)
        image_detect = resized_image.copy()
        pair, image_detect, boxes = manager.detect_pair_side_cam(
            cfg, boxes, image_detect)
        result = None
        if (pair is not None and len(pair) > 0):
            images = [item[0] for item in pair]
            save_images = [img.copy() for img in images]
            boxes, scores, classes, valid_detections = await manager.detect_errors(
                self.__main_cfg, images, None)
            err_cfg = self.__main_cfg["err_cfg"]
            helper.draw_yolo_results(images,
                                     boxes,
                                     scores,
                                     classes,
                                     err_cfg["classes"],
                                     err_cfg["img_size"],
                                     min_score=err_cfg["yolo_score_threshold"])
            result = (save_images, images, boxes, scores, classes,
                      valid_detections)
        return helper.return_result(result, result_info)

    def __parse_defects_detection_result(self, images, scores, classes,
                                         classes_labels, min_score, defects):
        for i in range(len(images)):
            images[i] *= 255.
            images[i] = np.asarray(images[i], dtype=np.uint8)
            iscores = scores[i]
            iclasses = classes[i].astype(int)
            for score, cl in zip(iscores.tolist(), iclasses.tolist()):
                if score > min_score:
                    defect = classes_labels[cl]
                    if defect not in defects:
                        defects[defect] = 0
                    defects[defect] += 1

    def __handle_result_html_changed(self, html):
        self.ui.inpResult.setHtml(html)

    def __load_config(self):
        manager = DetectorConfig.instance().get_manager()
        configs = manager.get_configs()
        self.__last_display_type = "Original"
        self.ui.cbbDisplayType.setCurrentIndex(0)
        return
示例#7
0
class SideErrorDetectScreen(QWidget):
    backscreen: Signal
    nextscreen: Signal
    captured = Signal()

    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.ui = Ui_SideErrorDetectScreen()
        self.__main_cfg = None
        self.ui.setupUi(self)
        self.build()
        self.binding()

    def build(self):
        self.image1 = ImageWidget()
        self.image2 = ImageWidget()
        self.image3 = ImageWidget()
        self.imageLayout = self.ui.screen1.parentWidget().layout()
        self.imageLayout.replaceWidget(self.ui.screen1, self.image1)
        self.imageLayout.replaceWidget(self.ui.screen2, self.image2)
        self.imageLayout.replaceWidget(self.ui.screen4, self.image3)
        self.ui.screen1.deleteLater()
        self.ui.screen2.deleteLater()
        self.ui.screen4.deleteLater()

        self.ui.cbbWidth.setPlaceholderText("Width")
        self.ui.cbbHeight.setPlaceholderText("Height")
        self.ui.cbbWidth.setCurrentIndex(-1)
        self.ui.cbbHeight.setCurrentIndex(-1)
        frame_resize_values = [str(32 * i) for i in range(1, 20)]

        self.ui.cbbHeight.clear()
        for value in frame_resize_values:
            self.ui.cbbHeight.addItem(value, userData=int(value))

        self.ui.cbbWidth.clear()
        for value in frame_resize_values:
            self.ui.cbbWidth.addItem(value, userData=int(value))

    def validate_show(self):
        _, main_cfg = DetectorConfig.instance().get_manager().get_main_config()
        if main_cfg is None:
            return "Main configuration not founded"
        return None

    def showEvent(self, event):
        _, self.__main_cfg = DetectorConfig.instance().get_manager(
        ).get_main_config()
        _, self.__current_cfg = DetectorConfig.instance().get_current_cfg()
        self.__set_btn_capture_text()
        self.__load_config()

    def __load_config(self):
        err_cfg = self.__main_cfg["err_cfg"]
        img_size = err_cfg["img_size"]
        inp_shape = err_cfg["inp_shape"]
        yolo_iou_threshold = err_cfg["yolo_iou_threshold"]
        yolo_max_boxes = err_cfg["yolo_max_boxes"]
        yolo_score_threshold = err_cfg["yolo_score_threshold"]
        weights = err_cfg["weights"]
        classes = err_cfg["classes"]
        num_classes = err_cfg["num_classes"]

        width = img_size[0]
        height = img_size[1]

        self.ui.cbbWidth.setCurrentText(str(width))
        self.ui.cbbHeight.setCurrentText(str(height))

        self.ui.inpModelChoice.setText(weights)
        self.ui.sldIoUThresh.setValue(int(yolo_iou_threshold * 100))
        self.ui.groupIoUThresh.setTitle(f"IoU threshold: {yolo_iou_threshold}")
        self.ui.sldScoreThresh.setValue(int(yolo_score_threshold * 100))
        self.ui.groupScoreThresh.setTitle(
            f"Score threshold: {yolo_score_threshold}")
        self.ui.inpMaxInstances.setValue(yolo_max_boxes)
        self.ui.inpClasses.setText(", ".join(classes))

    # binding
    def binding(self):
        self.backscreen = self.ui.btnBack.clicked
        self.nextscreen = self.ui.btnFinish.clicked
        self.ui.btnCapture.clicked.connect(self.btn_capture_clicked)
        self.ui.btnChoosePicture.clicked.connect(
            self.btn_choose_picture_clicked)
        self.ui.btnDetect.clicked.connect(self.btn_detect_clicked)

    # hander
    def btn_capture_clicked(self):
        self.captured.emit()
        self.__set_btn_capture_text()

    def __set_btn_capture_text(self):
        timer_active = DetectorConfig.instance().get_timer().isActive()
        self.ui.btnCapture.setText("CAPTURE" if not timer_active else "STOP")
        self.ui.btnChoosePicture.setEnabled(not timer_active)
        self.ui.btnDetect.setEnabled(not timer_active)

    def btn_detect_clicked(self):
        manager = DetectorConfig.instance().get_manager()
        if self.__last_pair is not None and manager.get_model() is not None:
            if len(self.__last_pair) == 0:
                helpers.show_message("No pair detected")
                return
            images = [img.copy() for idx, img in enumerate(self.__last_pair)]
            runnable = WorkerRunnable(self.__detect_error, images, parent=self)
            runnable.work_error.connect(lambda ex: print(ex))
            QThreadPool.globalInstance().start(runnable)

    @asyncSlot()
    async def btn_choose_picture_clicked(self):
        url, _ = helpers.file_chooser_open_file(
            self, f_filter="Images (*.jpg *.png *.bmp)")
        if url.isEmpty(): return
        file_name = url.toLocalFile()
        images = [cv2.imread(file_name)]
        await self.__detect_error_on_picture(images)

    def view_cam(self, image):
        # read image in BGR format
        self.__last_image = image
        label_w = self.image1.width()
        label_h = self.image1.height()
        dim = (label_w, label_h)
        if image is None:
            self.image1.imshow(image)
            self.image2.imshow(image)
            self.image3.imshow(image)
            return
        orig = cv2.resize(image, dim)
        self.image1.imshow(orig)
        proc, self.__last_pair = self.__process_pair(image)
        proc = cv2.resize(proc, dim)
        self.image2.imshow(proc)

    async def __detect_error(self, images):
        manager = DetectorConfig.instance().get_manager()
        err_task = manager.detect_errors(self.__main_cfg, images, None)
        boxes, scores, classes, valid_detections = await err_task
        err_cfg = self.__main_cfg["err_cfg"]
        helper.draw_yolo_results(images,
                                 boxes,
                                 scores,
                                 classes,
                                 err_cfg["classes"],
                                 err_cfg["img_size"],
                                 min_score=err_cfg["yolo_score_threshold"])

        label_w = self.image3.width()
        label_h = self.image3.height()
        for idx, img in enumerate(images):
            images[idx] *= 255.
            images[idx] = np.asarray(images[idx], np.uint8)
        final_img = helpers.concat_images(images, label_w, label_h)
        self.image3.imshow(final_img)

    async def __detect_error_on_picture(self, images):
        manager = DetectorConfig.instance().get_manager()
        err_task = manager.detect_errors(self.__main_cfg, images, None)
        boxes, scores, classes, valid_detections = await err_task
        err_cfg = self.__main_cfg["err_cfg"]
        helper.draw_yolo_results(images,
                                 boxes,
                                 scores,
                                 classes,
                                 err_cfg["classes"],
                                 err_cfg["img_size"],
                                 min_score=err_cfg["yolo_score_threshold"])
        label_w = self.image3.width()
        label_h = self.image3.height()
        for idx, img in enumerate(images):
            images[idx] *= 255.
            images[idx] = np.asarray(images[idx], np.uint8)
        final_img = helpers.concat_images(images, label_w, label_h)
        self.image3.imshow(final_img)

    def __process_pair(self, image):
        manager = DetectorConfig.instance().get_manager()
        boxes, proc = manager.extract_boxes(self.__current_cfg, image)
        image_detect = image.copy()
        pair, image_detect, boxes = manager.detect_pair_side_cam(
            self.__current_cfg, boxes, image_detect)

        # output
        for b in boxes:
            c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b
            helper.draw_boxes(image, box)

        if (pair is not None):
            pair_len = len(pair)
            images = [item[0] for item in pair]
            return image, images

        return image, None
class DetectionConfigScreen(QWidget):
    BRIGHTNESS_STEP = 0.1
    CONTRAST_STEP = 5
    THRESHOLD1_STEP = 5
    THRESHOLD2_STEP = 5
    __last_selected_row = -1
    backscreen: Signal
    nextscreen: Signal
    captured = Signal()
    camera_changed = Signal(object)

    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.__cam_array = helpers.get_all_camera_index()
        self.__current_cfg = None
        self.ui = Ui_DetectionConfigScreen()
        self.ui.setupUi(self)
        self.build()
        self.binding()

    def build(self):
        self.image1 = ImageWidget()
        self.image2 = ImageWidget()
        self.image3 = ImageWidget()
        self.imageLayout = self.ui.screen1.parentWidget().layout()
        self.imageLayout.replaceWidget(self.ui.screen1, self.image1)
        self.imageLayout.replaceWidget(self.ui.screen2, self.image2)
        self.imageLayout.replaceWidget(self.ui.screen3, self.image3)
        self.ui.screen1.deleteLater()
        self.ui.screen2.deleteLater()
        self.ui.screen3.deleteLater()

        self.ui.cbbWidth.setPlaceholderText("Width")
        self.ui.cbbHeight.setPlaceholderText("Height")
        self.ui.cbbCamera.setPlaceholderText("Choose Cam")

        self.ui.cbbCamera.clear()
        for camera in self.__cam_array:
            self.ui.cbbCamera.addItem("Camera " + str(camera), userData=camera)

        frame_resize_values = [
            "160", "240", "320", "400", "480", "560", "640", "720", "800",
            "880", "960", "1040", "1120", "1200", "1280"
        ]

        self.ui.cbbHeight.clear()
        for value in frame_resize_values:
            self.ui.cbbHeight.addItem(value, userData=int(value))

        self.ui.cbbWidth.clear()
        for value in frame_resize_values:
            self.ui.cbbWidth.addItem(value, userData=int(value))

        self.ui.cbbMethod.clear()
        self.ui.cbbMethod.addItem("Edge", userData="edge")
        self.ui.cbbMethod.addItem("Threshold", userData="thresh")
        self.ui.cbbMethod.addItem("Range", userData="range")

        table = self.ui.tblCameraConfig
        table.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
        table.horizontalHeader().setSectionResizeMode(
            1, QHeaderView.ResizeToContents)
        table.setEditTriggers(QAbstractItemView.NoEditTriggers)
        table.setContextMenuPolicy(Qt.CustomContextMenu)
        self.__reload_table()

        # create context menu
        self.tblPopMenu = QMenu(self)
        self.tblPopActionEditName = QAction('Edit name', self)
        self.tblPopActionRemove = QAction('Remove', self)
        self.tblPopMenu.addAction(self.tblPopActionEditName)
        self.tblPopMenu.addAction(self.tblPopActionRemove)

    def showEvent(self, event):
        _, self.__current_cfg = DetectorConfig.instance().get_current_cfg()
        self.__set_btn_capture_text()
        if self.__current_cfg is None:
            self.__show_config_section(False)
            return
        self.__load_config()

    def __reload_table(self):
        table = self.ui.tblCameraConfig
        table.clearContents()
        table.setRowCount(0)
        manager = DetectorConfig.instance().get_manager()
        cfgs = manager.get_configs()
        for cfg in cfgs:
            camera_name = cfg["name"]
            is_main = cfg["is_main"]
            if (is_main):
                self.__add_new_row(table, camera_name, "Main camera")
            else:
                self.__add_new_row(table, camera_name, "Side camera")
        table.clearSelection()
        table.itemSelectionChanged.emit()
        self.__last_selected_row = -1
        self.image1.imshow(None)
        self.image2.imshow(None)
        self.image3.imshow(None)

    def __reload_roles(self):
        table = self.ui.tblCameraConfig
        manager = DetectorConfig.instance().get_manager()
        cfgs = manager.get_configs()
        count = 0
        for cfg in cfgs:
            camera_name = cfg["name"]
            is_main = cfg["is_main"]
            item = table.item(count, 1)
            if (is_main):
                item.setData(Qt.ItemDataRole.DisplayRole, "Main camera")
            else:
                item.setData(Qt.ItemDataRole.DisplayRole, "Side camera")
            count += 1

    #BINDING
    def binding(self):
        self.backscreen = self.ui.btnBack.clicked
        self.nextscreen = self.ui.btnNext.clicked
        self.ui.btnCapture.clicked.connect(self.btn_capture_clicked)
        DetectorConfig.instance().manager_changed.connect(self.manager_changed)
        self.ui.sldBrightness.valueChanged.connect(
            self.sld_brightness_value_change)
        self.ui.sldContrast.valueChanged.connect(
            self.sld_contrast_value_change)
        self.ui.sldThreshold1.valueChanged.connect(
            self.sld_threshold1_value_change)
        self.ui.sldThreshold2.valueChanged.connect(
            self.sld_threshold2_value_change)
        self.ui.sldBlur.valueChanged.connect(self.sld_blur_value_change)
        self.ui.sldDilate.valueChanged.connect(self.dilate_value_change)
        self.ui.sldErode.valueChanged.connect(self.sld_erode_value_change)
        self.ui.sldBkgThresh.valueChanged.connect(self.sld_bkg_value_change)
        self.ui.sldLightAdj.valueChanged.connect(
            self.sld_light_adj_value_change)
        self.ui.sldLightAdjRange.valueChanged.connect(
            self.sld_light_adj_range_value_change)
        self.ui.cbbCamera.currentIndexChanged.connect(self.cbbCamera_changed)
        self.ui.btnColorFrom.clicked.connect(self.btn_color_from_clicked)
        self.ui.btnColorTo.clicked.connect(self.btn_color_to_clicked)
        self.ui.cbbHeight.currentIndexChanged.connect(self.cbbHeight_changed)
        self.ui.cbbWidth.currentIndexChanged.connect(self.cbbWidth_changed)
        self.ui.cbbMethod.currentIndexChanged.connect(self.cbbMethod_changed)
        self.ui.ckbInvertThresh.stateChanged.connect(
            self.chk_thresh_invert_state_change)
        self.ui.ckbInvertRange.stateChanged.connect(
            self.chk_range_invert_state_change)
        self.ui.tblCameraConfig.itemSelectionChanged.connect(
            self.tbl_camera_item_selection_changed)
        self.ui.tblCameraConfig.customContextMenuRequested.connect(
            self.table_context_menu)
        self.tblPopActionEditName.triggered.connect(
            self.table_action_edit_name_triggered)
        self.tblPopActionRemove.triggered.connect(
            self.table_action_remove_triggered)
        self.ui.btnAdd.clicked.connect(self.btn_add_clicked)

    #HANDLERS
    def btn_capture_clicked(self):
        self.captured.emit()
        self.__set_btn_capture_text()

    def __set_btn_capture_text(self):
        timer_active = DetectorConfig.instance().get_timer().isActive()
        self.ui.btnCapture.setText("CAPTURE" if not timer_active else "STOP")

    def manager_changed(self):
        self.__reload_table()

    def table_context_menu(self, point):
        self.tblPopMenu.exec_(self.ui.tblCameraConfig.mapToGlobal(point))

    def table_action_edit_name_triggered(self):
        table = self.ui.tblCameraConfig
        chosen_row = table.currentRow()
        detector_cfg = DetectorConfig.instance()
        camera_name = table.item(chosen_row, 0).text()
        dialog = DialogEditName(camera_name, parent=self)
        choice = dialog.exec_()
        if choice != QDialog.Accepted:
            return
        new_name = dialog.get_inp_edit_name().strip()
        err_text = detector_cfg.validate_config_name(new_name)
        if err_text is not None:
            helpers.show_message(err_text)
            return
        _, cfg = detector_cfg.get_current_cfg()
        cfg["name"] = new_name
        self.__reload_table()

    def table_action_remove_triggered(self, point):
        table = self.ui.tblCameraConfig
        chosen_row = table.currentRow()
        detector_cfg = DetectorConfig.instance()
        camera_name = table.item(chosen_row, 0).text()
        _, cfg = detector_cfg.get_manager().get_config_by_name(camera_name)
        detector_cfg.remove_config(cfg)
        self.__reload_table()

    #edge detection method
    def chk_thresh_invert_state_change(self):
        if self.ui.ckbInvertThresh.isChecked():
            self.__current_cfg["d_cfg"]["thresh_inv"] = True
        else:
            self.__current_cfg["d_cfg"]["thresh_inv"] = False

    def chk_range_invert_state_change(self):
        if self.ui.ckbInvertRange.isChecked():
            self.__current_cfg["d_cfg"]["color_inv"] = True
        else:
            self.__current_cfg["d_cfg"]["color_inv"] = False

    def sld_brightness_value_change(self):
        value = round(self.ui.sldBrightness.value() * self.BRIGHTNESS_STEP, 1)
        self.__current_cfg["d_cfg"]["alpha"] = value
        self.ui.grpboxSldBrightness.setTitle("Brightness: " + str(value))

    def sld_contrast_value_change(self):
        value = self.ui.sldContrast.value() * self.CONTRAST_STEP
        self.__current_cfg["d_cfg"]["beta"] = value
        self.ui.grbboxSldContrast.setTitle("Contrast: " + str(value))

    def sld_threshold1_value_change(self):
        value = self.ui.sldThreshold1.value() * self.THRESHOLD1_STEP
        self.__current_cfg["d_cfg"]["threshold1"] = value
        self.ui.grbboxSldThreshold.setTitle("Threshold 1: " + str(value))

    def sld_threshold2_value_change(self):
        value = self.ui.sldThreshold2.value() * self.THRESHOLD2_STEP
        self.__current_cfg["d_cfg"]["threshold2"] = value
        self.ui.grbboxSldThreshold2.setTitle("Threshold 2: " + str(value))

    def sld_blur_value_change(self):
        value = self.ui.sldBlur.value()
        self.__current_cfg["d_cfg"]["kernel"] = (2 * value + 1, 2 * value + 1)
        self.ui.grpboxSldBlur.setTitle("Blur: " + str(value))

    def dilate_value_change(self):
        value = self.ui.sldDilate.value()
        self.__current_cfg["d_cfg"]["d_kernel"] = np.ones((value, value))
        self.ui.grbboxSldDilate.setTitle("Dilate: " + str(value))

    def sld_erode_value_change(self):
        value = self.ui.sldErode.value()
        self.__current_cfg["d_cfg"]["e_kernel"] = np.ones((value, value))
        self.ui.grbboxSldErode.setTitle("Erode: " + str(value))

    #threshold detection method
    def sld_bkg_value_change(self):
        value = self.ui.sldBkgThresh.value()
        self.__current_cfg["d_cfg"]["bg_thresh"] = value
        self.ui.grpboxBkgThreshold.setTitle("Background Threshold: " +
                                            str(value))

    def sld_light_adj_value_change(self):
        value = self.ui.sldLightAdj.value()
        self.__current_cfg["d_cfg"]["light_adj_thresh"] = value
        self.ui.grpboxLightAdj.setTitle("Light Adjustment: " + str(value))

    #range detection method
    def sld_light_adj_range_value_change(self):
        value = self.ui.sldLightAdjRange.value()
        self.__current_cfg["d_cfg"]["light_adj_thresh"] = value
        self.ui.grpboxLightAdjRange.setTitle(f"Light Adjustment: {value}")

    def btn_color_from_clicked(self):
        # get initial color
        hsv = self.__current_cfg["d_cfg"]["cr_from"]
        h = 359 if (int(hsv[0] * 2) > 359) else int(hsv[0] * 2)
        s = int(hsv[1])
        v = int(hsv[2])
        init_hsv = QColor.fromHsv(h, s, v, a=255)

        color = QColorDialog.getColor(parent=self, initial=init_hsv)
        if color.isValid():
            hsv = color.getHsv()
            hsv = (hsv[0] / 2, hsv[1], hsv[2])
            self.__current_cfg["d_cfg"]["cr_from"] = hsv
            color_hex = color.name()
            self.ui.btnColorFrom.setStyleSheet("background-color: " +
                                               color_hex)

    def btn_color_to_clicked(self):
        #get initial color
        hsv = self.__current_cfg["d_cfg"]["cr_to"]
        h = 359 if (int(hsv[0] * 2) > 359) else int(hsv[0] * 2)
        s = int(hsv[1])
        v = int(hsv[2])
        init_hsv = QColor.fromHsv(h, s, v, a=255)

        color = QColorDialog.getColor(parent=self, initial=init_hsv)
        if color.isValid():
            hsv = color.getHsv()
            hsv = (hsv[0] / 2, hsv[1], hsv[2])
            self.__current_cfg["d_cfg"]["cr_to"] = hsv
            color_hex = color.name()
            self.ui.btnColorTo.setStyleSheet("background-color: " + color_hex)

    def btn_add_clicked(self):
        detector_cfg = DetectorConfig.instance()
        manager = detector_cfg.get_manager()
        table = self.ui.tblCameraConfig
        camera_name = self.ui.txtNewCamera.text().strip()
        err_text = detector_cfg.validate_config_name(camera_name)
        if err_text is not None:
            helpers.show_message(err_text)
            return
        new_cfg = detector.default_detector_config()
        new_cfg["name"] = camera_name
        detector_cfg.add_config(new_cfg)
        self.__add_new_row(table, camera_name, "Side camera")

    def tbl_camera_item_selection_changed(self):
        table = self.ui.tblCameraConfig
        chosen_row = table.currentRow()
        if chosen_row == self.__last_selected_row: return
        self.__last_selected_row = chosen_row
        detector_cfg = DetectorConfig.instance()
        if chosen_row != -1:
            camera_name = table.item(chosen_row, 0).text()
            detector_cfg.set_current_cfg_name(camera_name)
            _, self.__current_cfg = detector_cfg.get_current_cfg()
            self.__show_config_section(True)
            self.__load_config()
        else:
            detector_cfg.set_current_cfg_name(None)
            self.__show_config_section(False)
            self.camera_changed.emit(-1)

    def cbbCamera_changed(self):
        index = self.ui.cbbCamera.currentData()
        self.__current_cfg["camera_uri"] = index
        self.camera_changed.emit(index)

    def cbbMethod_changed(self, index: int):
        method = self.ui.cbbMethod.currentData()
        self.__current_cfg["detect_method"] = method
        self.ui.stackContainerMid.setCurrentIndex(index)

    def cbbHeight_changed(self):
        value = self.ui.cbbHeight.currentData()
        self.__current_cfg["frame_height"] = value

    def cbbWidth_changed(self):
        value = self.ui.cbbWidth.currentData()
        self.__current_cfg["frame_width"] = value

    # view camera
    def view_cam(self, image):
        # read image in BGR format
        label_w = self.image1.width()
        label_h = self.image1.height()
        dim = (label_w, label_h)
        if image is None:
            self.image1.imshow(image)
            self.image2.imshow(image)
            self.image3.imshow(image)
            return
        contour, proc = self.__process_contours(image.copy())
        img_resized = cv2.resize(image, dim)
        contour_resized = cv2.resize(contour, dim)
        proc_resized = cv2.resize(proc, dim)
        self.image1.imshow(img_resized)
        self.image2.imshow(contour_resized)
        self.image3.imshow(proc_resized)

    def __process_contours(self, image):
        manager = DetectorConfig.instance().get_manager()
        boxes, proc = manager.extract_boxes(self.__current_cfg, image)
        for b in boxes:
            c, rect, dimA, dimB, box, tl, tr, br, bl, minx, maxx, cenx = b
            helper.draw_boxes(image, box)
        return image, proc

    def __show_config_section(self, shown):
        if shown:
            helpers.show_all_children(self.ui.containerMidRange)
            helpers.show_all_children(self.ui.containerMidEdge)
            helpers.show_all_children(self.ui.containerMidThresh)
            helpers.show_all_children(self.ui.containerLeft)
            self.ui.btnCapture.show()
            self.ui.btnNext.show()
        else:
            helpers.hide_all_children(self.ui.containerMidRange)
            helpers.hide_all_children(self.ui.containerMidEdge)
            helpers.hide_all_children(self.ui.containerMidThresh)
            helpers.hide_all_children(self.ui.containerLeft)
            self.ui.btnCapture.hide()
            self.ui.btnNext.hide()

    #load init configs
    def __load_config(self):
        #edge
        brightness = self.__current_cfg["d_cfg"]["alpha"]
        contrast = self.__current_cfg["d_cfg"]["beta"]
        thresh1 = self.__current_cfg["d_cfg"]["threshold1"]
        thresh2 = self.__current_cfg["d_cfg"]["threshold2"]
        blur = self.__current_cfg["d_cfg"]["kernel"][0]
        dilate = self.__current_cfg["d_cfg"]["d_kernel"].shape[1]
        erode = self.__current_cfg["d_cfg"]["e_kernel"].shape[
            1] if self.__current_cfg["d_cfg"]["e_kernel"] is not None else 0
        #threshold
        bkg = self.__current_cfg["d_cfg"]["bg_thresh"]
        light_thresh = self.__current_cfg["d_cfg"]["light_adj_thresh"]
        #range
        light_range = self.__current_cfg["d_cfg"]["light_adj_thresh"]
        color_to = self.__current_cfg["d_cfg"]["cr_to"]
        color_from = self.__current_cfg["d_cfg"]["cr_from"]
        #main controls
        method_index = self.ui.cbbMethod.findData(
            self.__current_cfg["detect_method"])
        height_index = self.ui.cbbHeight.findData(
            self.__current_cfg["frame_height"])
        width_index = self.ui.cbbWidth.findData(
            self.__current_cfg["frame_width"])

        self.ui.sldBrightness.setValue(brightness / self.BRIGHTNESS_STEP)
        self.ui.sldContrast.setValue(contrast / self.CONTRAST_STEP)
        self.ui.sldThreshold1.setValue(thresh1 / self.THRESHOLD1_STEP)
        self.ui.sldThreshold2.setValue(thresh2 / self.THRESHOLD2_STEP)
        self.ui.sldBlur.setValue((blur - 1) / 2)
        self.ui.sldDilate.setValue(dilate)
        self.ui.sldErode.setValue(erode)

        self.ui.sldBkgThresh.setValue(bkg)
        self.ui.sldLightAdj.setValue(light_thresh)

        self.ui.sldLightAdjRange.setValue(light_range)
        hsv_from = self.__current_cfg["d_cfg"]["cr_from"]
        init_hsv_from = QColor.fromHsv(hsv_from[0] * 2, hsv_from[1],
                                       hsv_from[2], 255)
        self.ui.btnColorFrom.setStyleSheet("background-color: " +
                                           init_hsv_from.name())
        hsv_to = self.__current_cfg["d_cfg"]["cr_to"]
        init_hsv_to = QColor.fromHsv(hsv_from[0] * 2, hsv_from[1], hsv_from[2],
                                     255)
        self.ui.btnColorFrom.setStyleSheet("background-color: " +
                                           init_hsv_to.name())

        self.ui.cbbMethod.setCurrentIndex(method_index)
        self.ui.cbbHeight.setCurrentIndex(height_index)
        self.ui.cbbWidth.setCurrentIndex(width_index)

        camera_uri = self.__current_cfg["camera_uri"]
        if camera_uri is not None and camera_uri < self.ui.cbbCamera.count():
            self.ui.cbbCamera.setCurrentIndex(camera_uri)
            self.camera_changed.emit(camera_uri)
        else:
            self.ui.cbbCamera.setCurrentIndex(-1)
        self.__reload_roles()

    def __add_new_row(self, table, camera_name, is_main):
        current_row = table.rowCount()
        table.insertRow(current_row)
        name_item = QTableWidgetItem(camera_name)
        is_main_item = QTableWidgetItem(is_main)
        table.setItem(current_row, 0, name_item)
        table.setItem(current_row, 1, is_main_item)