Exemplo n.º 1
0
def get_conda_opencv_info(sys_name):
    try:
        import cv2
    except ImportError as e:
        print(str(e))
        print(
            'Please install opencv for python in your current Anaconda environment via the following:\n'
            'conda install -c conda-forge opencv=3.4.7')
        exit(-1)

    assert (cv2.getVersionMajor() == 3)  #currently we do not support opencv4

    if sys_name == 'Linux':
        opencv_lib_dir = os.path.abspath(
            os.path.join(os.path.dirname(cv2.__file__), *(['..'] * 2)))
        available_opencv_libs = glob.glob(
            os.path.join(opencv_lib_dir, 'libopencv*.so'))
    elif sys_name == 'Darwin':
        opencv_lib_dir = os.path.abspath(
            os.path.join(os.path.dirname(cv2.__file__), *(['..'] * 2)))
        available_opencv_libs = glob.glob(
            os.path.join(opencv_lib_dir, 'libopencv*.dylib'))
    else:  #sys_name=='Windows'
        opencv_lib_dir = os.path.abspath(
            os.path.join(os.path.dirname(cv2.__file__), *['..'] * 2, 'Library',
                         'lib'))
        available_opencv_libs = glob.glob(
            os.path.join(opencv_lib_dir, 'opencv*.lib'))

    opencv_inc_dir = os.path.abspath(
        os.path.join(opencv_lib_dir, '..', 'include'))
    assert (os.path.exists(opencv_lib_dir))
    assert (os.path.exists(opencv_inc_dir))
    assert (os.path.exists(os.path.join(opencv_inc_dir, 'opencv2')))

    assert (any(available_opencv_libs))
    available_opencv_libs = [
        os.path.splitext(os.path.basename(it))[0].lstrip('lib')
        for it in available_opencv_libs
    ]

    if sys_name == 'Windows':
        opencv_libs = [
            lib for lib in available_opencv_libs
            if any([lib.startswith(it) for it in required_opencv_modules])
        ]
    else:
        opencv_libs = [
            lib for lib in required_opencv_modules
            if lib in available_opencv_libs
        ]
    return opencv_lib_dir, opencv_inc_dir, opencv_libs
Exemplo n.º 2
0
    def __init__(self, inside, outside, factor=1.0, width=1920, height=1080):
        super().__init__()
        self.inside = self._scale(inside, factor)
        self.outside = self._scale(outside, factor)
        self.width = int(width * factor)
        self.height = int(height * factor)

        opencv_major = cv2.getVersionMajor()
        if opencv_major == 3:
            self._cv_find_contours = _cv3_find_contours
        elif opencv_major == 4:
            self._cv_find_contours = _cv4_find_contours
        else:
            raise ValueError('OpenCV {} is not supported'.format(
                cv2.getVersionMajor()))

        self.inside_contours = self._find_contours(self.inside, factor)
        self.outside_contours = self._find_contours(self.outside, factor)
        self.inside_mask = self._create_mask(self.inside, factor)
        self.outside_mask = self._create_mask(self.outside, factor)
        self.buffer_mask = np.logical_not(
            np.logical_or(self.inside_mask, self.outside_mask))
Exemplo n.º 3
0
def check_find(img, threshhold, mark_thresh, check_type):
    '''
    Returns an image labelled with all relevant checks.
    '''

    if cv2.getVersionMajor() in [2, 4]:
        contours, _ = cv2.findContours(threshold, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
    else:
        _, contours, _ = cv2.findContours(threshold, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)

    font = cv2.FONT_HERSHEY_TRIPLEX
    document_height, document_width = img.shape[0], img.shape[1]
    mark_thresh = float(mark_thresh.strip('%')) / 100.0

    for cnt in contours:
        approx = cv2.approxPolyDP(cnt, 0.1 * cv2.arcLength(cnt, True), True)
        coords = approx.ravel()

        # If quadrilateral
        if len(approx) == 4:
            x, y, x2, y2 = coords[0], coords[1], coords[4], coords[5]
            feature_height, feature_width = (y2 - y), (x2 - x)
            # If the size of the quadrilateral found is significant (e.g. not hidden inside text)
            if feature_width > float(
                    document_width) / 100 and feature_height > float(
                        document_width) / 100:
                # If a square (± 5 pixels)
                if abs(feature_height - feature_width) < 5:
                    crop_img = img[y:y + feature_height, x:x + feature_width]
                    # Thresholds the image to binary black and white
                    _, crop_thresh = cv2.threshold(
                        crop_img, 127, 255,
                        cv2.THRESH_BINARY | cv2.THRESH_OTSU)
                    total = crop_img.shape[0] * crop_img.shape[1]
                    count_black = total - cv2.countNonZero(crop_thresh)
                    if count_black > float(total) * mark_thresh and (
                            check_type == "filled" or check_type == "all"):
                        cv2.drawContours(img, [approx], 0, (0), 2)
                        cv2.putText(img, "Filled", (x, y), font, 1, (0))
                    elif check_type == "empty" or check_type == "all":
                        cv2.drawContours(img, [approx], 0, (0), 2)
                        cv2.putText(img, "Empty", (x, y), font, 0.5, (0))

        if len(approx) > 15:
            # TODO: Do something here if looking for radio buttons.
            continue
    return (img)
    def _diff_pages(self, doc, matched):
        ITERATIONS = 6

        m1 = doc
        m2 = matched

        m1 = cv2.cvtColor(m1, cv2.COLOR_BGR2GRAY)
        m2 = cv2.cvtColor(m2, cv2.COLOR_BGR2GRAY)

        cv2.adaptiveThreshold(m1, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                              cv2.THRESH_BINARY_INV, 5, 15, m1)
        cv2.adaptiveThreshold(m2, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                              cv2.THRESH_BINARY_INV, 5, 15, m2)

        kernel = np.ones((3, 3), np.uint8)

        m1 = cv2.dilate(m1, kernel, iterations=ITERATIONS)
        m2 = cv2.dilate(m2, kernel, iterations=ITERATIONS)

        combined = cv2.bitwise_xor(m1, m2)

        combined = cv2.erode(combined, kernel, iterations=ITERATIONS)

        if cv2.getVersionMajor() == 3:
            # OpenCV 3.4
            _, contours, _ = cv2.findContours(combined, cv2.RETR_LIST,
                                              cv2.CHAIN_APPROX_SIMPLE)
        else:
            # OpenCV 4.1.0
            contours, _ = cv2.findContours(combined, cv2.RETR_LIST,
                                           cv2.CHAIN_APPROX_SIMPLE)

        combined = cv2.cvtColor(combined, cv2.COLOR_GRAY2BGR)

        good_contours = []

        for i, k in enumerate(contours):
            _, _, w, h = cv2.boundingRect(k)

            if w > sizes.mmToPix(3, self._DPI) and h > sizes.mmToPix(
                    4, self._DPI):
                combined = cv2.drawContours(combined, contours, i, (0, 0, 255),
                                            cv2.FILLED)
                good_contours.append(k)

        return good_contours
Exemplo n.º 5
0
 def find_contours(self, img, p1, p2):
     self.point1 = (max(0, min(p1[0], p2[0])), max(0, min(p1[1], p2[1])))
     self.point2 = (min(max(p1[0], p2[0]),
                        img.shape[1]), min(max(p1[1], p2[1]), img.shape[0]))
     if int(cv2.getVersionMajor()) >= 4:
         cntrs, _ = cv2.findContours(img, cv2.RETR_LIST,
                                     cv2.CHAIN_APPROX_SIMPLE)
     else:
         _, cntrs, _ = cv2.findContours(img, cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)
     boxes = [
         cv2.boundingRect(cnt) for cnt in cntrs if self.filter_contour(cnt)
     ]
     boxes.sort(key=lambda b: (b[2], b[3]),
                reverse=True)  # sort by width/height, larger  first.
     boxes.sort(key=lambda b: b[0])  # sort by X pos
     return np.array(boxes)
    def _test_color(self, img):
        b = cv2.bitwise_not(cv2.extractChannel(img, 0))
        g = cv2.bitwise_not(cv2.extractChannel(img, 1))
        r = cv2.bitwise_not(cv2.extractChannel(img, 2))

        cv2.threshold(b, 64, 255, cv2.THRESH_BINARY, dst=b)
        cv2.threshold(g, 64, 255, cv2.THRESH_BINARY, dst=g)
        cv2.threshold(r, 64, 255, cv2.THRESH_BINARY, dst=r)

        sign = cv2.bitwise_or(
            cv2.bitwise_or(cv2.bitwise_xor(b, r), cv2.bitwise_xor(b, g)),
            cv2.bitwise_xor(r, g))

        sign = cv2.GaussianBlur(sign, (11, 11), 0.0)

        cv2.threshold(sign, 16, 255, cv2.THRESH_BINARY, dst=sign)

        if cv2.getVersionMajor() == 3:
            # OpenCV 3.4.0
            _, contours, _ = cv2.findContours(sign, cv2.RETR_LIST,
                                              cv2.CHAIN_APPROX_SIMPLE)
        else:
            # OpenCV 4.1.0
            contours, _ = cv2.findContours(sign, cv2.RETR_LIST,
                                           cv2.CHAIN_APPROX_SIMPLE)

        combined = cv2.cvtColor(sign, cv2.COLOR_GRAY2BGR)
        good_contours = []

        for i, k in enumerate(contours):
            x, y, w, h = cv2.boundingRect(k)

            if w > sizes.mmToPix(3, self._DPI) and h > sizes.mmToPix(
                    5, self._DPI):
                combined = cv2.drawContours(combined,
                                            contours,
                                            i, (0, 0, 255),
                                            thickness=cv2.FILLED)
                good_contours.append(k)

        return good_contours
Exemplo n.º 7
0
    def find_Contours(self, lpr_img, green_type):
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4, 4))
        lpr_img = clahe.apply(lpr_img)
        if green_type == 'sub':
            lpr_img = cv2.GaussianBlur(lpr_img, (5, 5), 0)
            lpr_img = cv2.erode(lpr_img,
                                np.ones((2, 2), np.uint8),
                                iterations=1)
            lpr_img = cv2.dilate(lpr_img,
                                 np.ones((3, 3), np.uint8),
                                 iterations=2)

        elif green_type in ['a', 'b']:
            lpr_img = cv2.GaussianBlur(lpr_img, (3, 3), 0)
            lpr_img = cv2.erode(lpr_img,
                                np.ones((2, 2), np.uint8),
                                iterations=1)
            lpr_img = cv2.dilate(lpr_img,
                                 np.ones((3, 2), np.uint8),
                                 iterations=1)

        lpr_img = imutils.auto_canny(lpr_img)
        lpr_img = cv2.copyMakeBorder(lpr_img,
                                     2,
                                     2,
                                     2,
                                     2,
                                     cv2.BORDER_CONSTANT,
                                     value=[0, 0, 0])
        lpr_img = cv2.dilate(lpr_img, np.ones((3, 1), np.uint8), iterations=1)

        if int(cv2.getVersionMajor()) >= 4:
            cntrs, _ = cv2.findContours(lpr_img, cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, cntrs, _ = cv2.findContours(lpr_img, cv2.RETR_LIST,
                                           cv2.CHAIN_APPROX_SIMPLE)
        return cntrs
Exemplo n.º 8
0
def check_build_info():
    success = True

    print("OpenCV Version: {}".format(cv2.__version__))
    if (cv2.getVersionMajor() != CURRENT_OPENCV_BUILD_VERSION[0]) and (
            cv2.getVersionMinor() != CURRENT_OPENCV_BUILD_VERSION[1]) and (
            cv2.getVersionRevision() != CURRENT_OPENCV_BUILD_VERSION[2]):
        print("ERROR: OpenCV version is different than the expected.")
        success = False

    print("Available CPUs: ", cv2.getNumberOfCPUs())
    print("Available threads: ", cv2.getNumThreads())
    if cv2.getNumThreads() < cv2.getNumberOfCPUs():
        print("ERROR: TBB is not enabled.")
        success = False

    cv2.CPU_NEON = 100  # Value taken from OpenCV doc. CPU labels don't work correctly in Python
    print("Cpu NEON support: ", cv2.checkHardwareSupport(cv2.CPU_NEON))
    if not cv2.checkHardwareSupport(cv2.CPU_NEON):
        print("ERROR: NEON is not enabled.")
        success = False

    return success
Exemplo n.º 9
0
def find_contour_center(img):
    ret, thresh = cv2.threshold(img.copy(), int(255 * 0.5), 255, 0)
    if cv2.getVersionMajor() in [2, 4]:
        contours, hierarchy = cv2.findContours(
            thresh,
            cv2.RETR_TREE,
            cv2.CHAIN_APPROX_SIMPLE)
    else:
        _, contours, hierarchy = cv2.findContours(
            thresh,
            cv2.RETR_TREE,
            cv2.CHAIN_APPROX_SIMPLE)
    print(len(contours))
    cx_max = 0
    for i, cnt in enumerate(contours):
        M = cv2.moments(cnt)
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])
        if cx_max < cx:
            cx_max = cx
            cx_result = cx
            cy_result = cy

    return cx_result, cy_result
Exemplo n.º 10
0
def find_rois(confidence,
              confidence_gt=None,
              confidence_thresh=0.5,
              area_thresh=50,
              padding=50):
    """Find rois

    gtとの比較はlossの方で行う.ここではconfidenceの推論からroiを提案すればよい.

    Parametersa
    ----------
    confidence : torch.Tensor or numpy.ndarray
        NCHW or HW(numpy.ndarray)
    padding : int, optional
        Expand roi by padding, by default 50

    Returns
    -------
    rois : torch.Tensor
        rois [[[x1, y1, x2, y2], ..., [x1, y1, x2, y2]],
              ... [x1, y1, x2, y2], ..., [x1, y1, x2, y2]]
        len(rois) = n (=batch size)

        - example shape
        rois_list (2,)
        rois_list[0] torch.Size([46, 4])
        rois_list[1] torch.Size([38, 4])
        The first dimension of rois_list shows a batch,
        which contains (the number of roi, (dx, dy, dw, dh)).

    rois_center : numpy.ndarray
        rois_center [[x, y]]
        len(rois_center) = n (=batch size)

    """
    if isinstance(confidence, torch.Tensor):
        confidence = confidence.cpu().detach().numpy().copy()
    elif isinstance(confidence, np.ndarray):
        if len(confidence.shape) == 2:
            confidence = confidence[None, None, ...]

    rois = []
    rois_center = []
    for n in range(confidence.shape[0]):
        rois_n = None
        confidence_mask = confidence[n, ...].transpose(1, 2, 0)
        confidence_mask[confidence_mask > 1] = 1
        confidence_mask[confidence_mask < 0] = 0
        confidence_mask *= 255
        confidence_mask = confidence_mask.astype(np.uint8)
        _, confidence_mask = cv2.threshold(confidence_mask,
                                           int(255 * confidence_thresh), 255,
                                           0)

        # if sys.version_info[0] == 2:
        if cv2.getVersionMajor() in [2, 4]:
            contours, hierarchy = cv2.findContours(confidence_mask,
                                                   cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, contours, hierarchy = cv2.findContours(confidence_mask,
                                                      cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_SIMPLE)
        if len(contours) == 0:
            # set dummy rois. None にするとroi_alignでerrorが起きる.
            rois_n = torch.tensor([[0, 0, 0, 0]],
                                  dtype=torch.float32).to('cuda')
            try:
                rois.append(rois_n)
                rois_center.append(np.array([0, 0]))

            except Exception:
                print('rois_n', rois_n)
                raise
            continue

        box = None

        for i, cnt in enumerate(contours):
            try:
                area = cv2.contourArea(cnt)
                if area < area_thresh:
                    continue
                box = cv2.boundingRect(cnt)

            except Exception:
                continue

            box_center = [int(box[0] + box[2] / 2), int(box[1] + box[3] / 2)]
            box = expand_box(box, confidence_mask.shape, padding=padding)
            if rois_n is None:
                rois_n = torch.tensor(
                    [[box[0], box[1], box[0] + box[2], box[1] + box[3]]],
                    dtype=torch.float32).to('cuda')
                rois_n_c = np.array([box_center])

            else:
                rois_n = torch.cat(
                    (rois_n,
                     torch.tensor(
                         [[box[0], box[1], box[0] + box[2], box[1] + box[3]]],
                         dtype=torch.float32).to('cuda')))
                rois_n_c = np.concatenate([rois_n_c, [box_center]])

        if rois_n is None:
            rois_n = torch.tensor([[0, 0, 0, 0]],
                                  dtype=torch.float32).to('cuda')
            rois_n_c = np.array([[0, 0]])
        try:
            rois.append(rois_n)
            rois_center.append(rois_n_c)
        except Exception:
            print('rois_n', rois_n)
            print('rois_n_c', rois_n_c)
            raise
    # return None, None
    return (None, None) if rois == [] else (rois, rois_center)
def main():
    cap = cv2.VideoCapture(2)
    x = 0
    while 1:

        """ GET IMAGE MASK """
        # load the image, convert it to grayscale, and blur it slightly
        ret, image = cap.read()
        width = image.shape[1]
        height = image.shape[0]
        height_adjust = 0.4
        width_adjust = 0.4
        y = (int)((height - height*height_adjust)*0.5 )-25
        h =  (int)(height*height_adjust )
        x = (int)((width - width*width_adjust)*0.5 ) - 23
        w = (int)(width * width_adjust )
        
        image = image[y:y+h, x:x+w]    
        image = cv2.resize(image, (1920, 1080), interpolation=cv2.INTER_AREA)
        gray = image
        if ret == True:
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)

        # threshold the image, then perform a series of erosions +
        # dilations to remove any small regions of noise
        thresh = cv2.threshold(gray, 140, 170, cv2.THRESH_BINARY)[1]
        thresh = cv2.erode(thresh, None, iterations=2)
        thresh = cv2.dilate(thresh, None, iterations=2)

        """ FIND CONTOURS """
        output = image.copy()
        contours, hierarchy = [0, 0]
        if cv2.getVersionMajor() in [2, 4]:
            contours, hierarchy = cv2.findContours(
                thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        else:
            # OpenCV 3 case
            im2, contours, hierarchy = cv2.findContours(
                thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        corners = []
        dstTri = []

        if len(contours) != 0:
            # the contours are drawn here
            cv2.drawContours(output, contours, -1, 255, 3)

            # find the biggest area of the contour
            c = max(contours, key=cv2.contourArea)

            x, y, w, h = cv2.boundingRect(c)
            # draw the 'human' contour (in green)
            cv2.rectangle(output, (x, y), (x+w, y+h), (0, 255, 0), 2)

            """ FIND CORNERS"""
            rc = cv2.minAreaRect(c)
            corners = cv2.boxPoints(rc)

            #blue is bottom
            #green is left
            #red is top
            # grey is right

            cv2.circle(
                output, (corners[0][0], corners[0][1]), 5, (200, 0, 0), 2)
            # bottom right
            cv2.circle(
                output, (corners[1][0], corners[1][1]), 5, (0, 200, 0), 2)
            # top left
            cv2.circle(
                output, (corners[2][0], corners[2][1]), 5, (0, 0, 200), 2)
            # top right
            cv2.circle(
                output, (corners[3][0], corners[3][1]), 5, (100, 100, 100), 2)

            print(corners)
            print(corners[2])
            print(corners[2, 0])
            print(corners[2, 1])
            if corners[1][1] > corners[3][1]:
                print(corners[2][0])
                dstTri = np.array(
                    [[corners[2][0],corners[2][1]], [corners[3][0],corners[3][1]], [corners[1][0],corners[1][1]]]).astype(np.float32)
            else:
                dstTri = np.array([
                    [corners[1][0],corners[1][1]], [corners[2][0],corners[2][1]], [corners[0][0],corners[0][1]]]).astype(np.float32)

        cv2.imshow("Image", image)    # original
        cv2.imshow("gray", gray)       # grayscale + gaussian blur
        cv2.imshow("thresh", thresh)   # mask
        cv2.imshow("Result", output)  # Bounding box + corners on biggest blob

        """ PROJECTOR STUFF """
        src = cv2.imread('pl-boraham.jpg')

        if src is None:
            print('image not avaible')
            exit(0)
            
        src = cv2.resize(src, (1920, 1080), interpolation=cv2.INTER_AREA)
        # [Load the image]
        srcTri = np.array([[0, 0], [src.shape[1] - 1, 0], [0, src.shape[0] - 1]]).astype(np.float32)

        p = 0
        name = 'warp'
        warp_mat = cv2.getAffineTransform(srcTri, dstTri)
        warp_dst = cv2.warpAffine(src, warp_mat, (src.shape[1], src.shape[0]))
        cv2.imshow(name, warp_dst)
        cv2.waitKey(20)
        
        
        cv2.namedWindow(name)
        #cv2.imshow(name, src)
        cv2.moveWindow(name, 0, 0)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()  # Close all the windows
            break
Exemplo n.º 12
0
def check_find(img, thresholdimg, mark_thresh, check_type):
    """
    Returns an image labelled with all relevant checks, and a json with the details
    :param img:
    :param thresholdimg:
    :param mark_thresh:
    :param check_type:
    :return: image, results
    """
    if cv2.getVersionMajor() in [2, 4]:
        contours, _ = cv2.findContours(thresholdimg, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
    else:
        _, contours, _ = cv2.findContours(thresholdimg, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)

    font = cv2.FONT_HERSHEY_TRIPLEX
    document_height, document_width = img.shape[0], img.shape[1]
    mark_thresh = float(mark_thresh.strip('%')) / 100.0

    results = []
    count = 0
    for cnt in contours:
        approx = cv2.approxPolyDP(cnt, 0.1 * cv2.arcLength(cnt, True), True)
        coords = approx.ravel()

        # If quadrilateral
        if len(approx) == 4:
            border_thickness = 0.1
            sqaureness = 5  # (pixels)
            x, y, x2, y2 = coords[0], coords[1], coords[4], coords[5]
            feature_height, feature_width = (y2 - y), (x2 - x)
            # If the size of the quadrilateral found is significant (e.g. not hidden inside text)
            if feature_width > float(
                    document_width) / 100 and feature_height > float(
                        document_width) / 100:
                border_thickness_y = math.floor(border_thickness *
                                                feature_height)
                border_thickness_x = math.floor(border_thickness *
                                                feature_width)

                # If a square (± 5 pixels)
                if abs(feature_height - feature_width) < sqaureness:
                    # Is it a duplicate (based on IOU)
                    duplicate = False
                    for result in results:
                        existingCheckBox = result['boundingbox']
                        if (bb_intersection_over_union(
                            [x, y, x2, y2], existingCheckBox) > 0.55):
                            duplicate = True
                    if not duplicate:
                        checkbox = {}
                        checkbox['boundingbox'] = [x, y, x2, y2]
                        crop_img = img[y + border_thickness_y:y +
                                       feature_height - border_thickness_y,
                                       x + border_thickness_x:x +
                                       feature_width - border_thickness_x]
                        # Thresholds the image to binary black and white
                        _, crop_thresh = cv2.threshold(
                            crop_img, 127, 255,
                            cv2.THRESH_BINARY | cv2.THRESH_OTSU)
                        total = crop_img.shape[0] * crop_img.shape[1]
                        count_black = total - cv2.countNonZero(crop_thresh)
                        if count_black > float(total) * mark_thresh and (
                                check_type == "checked"
                                or check_type == "all"):
                            cv2.drawContours(img, [approx], 0, (0), 2)
                            cv2.putText(img, "Filled", (x, y), font, 1, (0))
                            checkbox['type'] = "checked"
                        elif check_type == "empty" or check_type == "all":
                            cv2.drawContours(img, [approx], 0, (0), 2)
                            cv2.putText(img, "Empty", (x, y), font, 0.5, (0))
                            checkbox['type'] = "empty"
                        results.append(checkbox)
        if len(approx) > 15:
            # TODO: Do something here if looking for radio buttons.
            continue
    results = sorted(results, key=lambda checkbox: checkbox['boundingbox'][1])
    results = sorted(results, key=lambda checkbox: checkbox['boundingbox'][0])
    for idx, result in enumerate(results):
        result['id'] = idx
    # add ID
    return img, results
    def cal_steer_angle(self, img, vis_img=None, depth_img=None):
        """
        Calculate steering angle for car
        :param img: bgr image
        :return: steering angle (-60 to 60)
        """
        # Init steering angle to 0
        steer_angle = 0

        # Get birdview image
        img_bv = self.bird_view(img)

        # Run semantic segmentation on RGB image
        seg_masks = self.segmentation.get_masks(img)

        if vis_img is not None:
            vis_img = self.segmentation.get_visualization_img(
                vis_img, seg_masks)

        # Get road mask
        road_mask = seg_masks[TrafficObject.ROAD.name]

        # Filter to get only largest white area in road mask
        if cv2.getVersionMajor() in [2, 4]:
            # OpenCV 2, OpenCV 4 case
            contours, hierarchy = cv2.findContours(road_mask, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)
        else:
            # OpenCV 3 case
            _, contours, hierarchy = cv2.findContours(road_mask, cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_SIMPLE)

        # Choose largest contour
        best = -1
        maxsize = -1
        count = 0
        for cnt in contours:
            if cv2.contourArea(cnt) > maxsize:
                maxsize = cv2.contourArea(cnt)
                best = count
            count = count + 1

        road_mask[:, :] = 0
        if best != -1:
            cv2.drawContours(road_mask, [contours[best]], 0, 255, -1)

        # cv2.imshow("Debug", road_mask)
        # cv2.waitKey(1)

        car_mask = seg_masks[TrafficObject.CAR.name]
        perdestrian_mask = seg_masks[TrafficObject.PERDESTRIAN.name]
        road_mask = road_mask & cv2.bitwise_not(car_mask)

        # Clear car mask if not use it
        if not config.USE_CAR_MASK_SEMANTIC_SEG:
            car_mask[:, :] = 0

        # Use depth image
        if depth_img is not None:
            obstacle_mask = depth_img & road_mask
            car_mask = obstacle_mask | car_mask

            if self.debug_stream:
                self.debug_stream.update_image('obstacle_mask', obstacle_mask)

        # Convert to bird view
        road_mask_bv = self.bird_view(road_mask)

        # ====== Turning =======

        if self.is_turning:
            if self.turning_time_begin + config.TURNING_TIME < time.time():
                self.is_turning = False
            else:
                return self.current_turning_direction * config.TURNING_ANGLE, vis_img
        else:
            interested_area = road_mask_bv[80:180, :]
            lane_area = np.count_nonzero(interested_area)

            if config.SHOW_AREA:
                print("Lane area: {}".format(lane_area))

            if lane_area > config.ROAD_AREA_TO_TURN:
                print("Turning")
                self.is_turning = True
                self.turning_time_begin = time.time()

                self.current_turning_direction = self.current_traffic_sign
                print(self.current_turning_direction)

                # Reset traffic sign
                self.current_traffic_sign = config.SIGN_NO_SIGN

                return self.current_turning_direction * config.TURNING_ANGLE, vis_img

        # ====== If not turning, calculate steering angle using middle point =======

        # TODO: The method to calculate the middle point and angle now is so simple.
        # Research for others in the future
        interested_row = road_mask_bv[int(road_mask_bv.shape[0] / 3 *
                                          2), :].reshape((-1, ))
        white_pixels = np.argwhere(interested_row > 0)

        if white_pixels.size != 0:
            middle_pos = np.mean(white_pixels)
        else:
            middle_pos = 160

        if middle_pos != middle_pos:  # is NaN
            middle_pos = 0

        # ====== Obstacle avoidance =======
        danger_zone, danger_zone_y = self.obstacle_detector.find_danger_zone(
            car_mask, perdestrian_mask)

        # print(danger_zone, danger_zone_y)

        # Avoid obstacles
        if danger_zone != (0, 0):

            # 2 objects
            if danger_zone[0] == -1:
                self.object_avoidance_direction = 0
                # middle_pos = danger_zone[1]

            # single object
            else:
                center_danger_zone = int((danger_zone[0] + danger_zone[1]) / 2)

                count_road_pixels_left = np.count_nonzero(
                    road_mask[danger_zone_y, :center_danger_zone])
                count_road_pixels_right = np.count_nonzero(
                    road_mask[danger_zone_y, center_danger_zone:])

                # obstacle is on the right
                if count_road_pixels_left > count_road_pixels_right:
                    self.object_avoidance_direction = -1
                    self.last_object_time = time.time()
                    # middle_pos = danger_zone[0]
                    print("OBSTACLE: RIGHT")
                # left
                elif count_road_pixels_left < count_road_pixels_right:
                    self.object_avoidance_direction = 1
                    self.last_object_time = time.time()
                    # middle_pos = danger_zone[1]
                    print("OBSTACLE: LEFT")

        # Object avoidance
        if self.last_object_time > time.time(
        ) - config.OBSTACLE_AVOIDANCE_TIME:
            middle_pos += config.OBSTACLE_AVOIDANCE_OFFSET * self.object_avoidance_direction
            print("Obstacle avoidance direction: " +
                  str(self.object_avoidance_direction))
        elif self.last_object_time < time.time(
        ) - config.OBSTACLE_AVOIDANCE_TIME - 1:
            self.object_avoidance_direction = 0
            # print("Obstacle was over")

        if self.debug_stream:
            half_car_width = config.CAR_WIDTH // 2
            cv2.line(img_bv, (int(middle_pos), self.h // 2),
                     (self.w // 2, self.h), (255, 0, 0), 2)
            cv2.line(img_bv, (int(middle_pos) + half_car_width, self.h // 2),
                     (self.w // 2 + half_car_width, self.h), (255, 0, 255), 3)
            cv2.line(img_bv, (int(middle_pos) - half_car_width, self.h // 2),
                     (self.w // 2 - half_car_width, self.h), (255, 0, 255), 3)
            self.debug_stream.update_image('car_controlling', img_bv)

        # Add offset to middle pos
        middle_pos += config.MIDDLE_POS_OFFSET

        # Distance between MiddlePos and CarPos
        distance_x = middle_pos - self.w / 2
        distance_y = self.h - self.h / 3 * 2

        # Angle to middle position
        steer_angle = math.atan(
            float(distance_x) / distance_y) * 180 / math.pi * 1.2

        # QIK MATH
        # steer_angle = ((middle_pos - 160) / 160) * 60

        return steer_angle, vis_img