Esempio n. 1
0
def detect_by_gf(origin,
                 isdebug=False):
    if origin is None:
        return None
    # Default Size
    h,w,c = origin.shape
    size = 200.0
    # Resize
    img = cv2.resize(origin,(int(w*size/h),int(size)))   
    # Extract Good Features
    corners = refinedGoodFeatures(origin,img,
                                  model='LP')
    mask = checkFeatures(img,corners,True)
    # Find Candidate
    bboxes = findBBox(img,mask,
                      model='LP',
                      debug=True)
    # Resize
    if bboxes is not None:
        bboxes = resizeBBoxes(bboxes,h/size)
    # Check Result
    if isdebug and bboxes is not None:
        drawBBox(origin,bboxes,debug=True)
    # Crop Rois
    rois = BBoxes2ROIs(origin,bboxes)
    if isdebug and rois is not None:
        for i in range(len(rois)):
            showResult("cropped",rois[i])
            
    return bboxes,rois
Esempio n. 2
0
def colormask(image,
              isday=True,
              isdebug=False):
    # blue mask + yellow mask
    hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
    h,s,v = cv2.split(hsv)
    sat = (s.copy()).astype('float')
    val = (v.copy()).astype('float')
    sat /= 255
    val /= 255
    #
    blue = h.copy()
    yellow = h.copy()
    if isday:
        blue = np.where((h<100)|(h>135)|(v<40)|(s<40),0,255)
        yellow = np.where((h<20)|(h>40)|(v<40)|(s<40),0,255)
    else:
        blue = np.where((h<105)|(h>135),0,255)
        yellow = np.where((h<20)|(h>40),0,255)        
    #
    hue = np.zeros(image.shape[:2])
    hue[hue>=0] = 0.2
    hue[blue>0] = 0.9
    hue[yellow>0] = 0.7
    if isday:
        mask=(hue*sat*val)**2
    else:
        mask=(hue*sat)**3
    #
    if isdebug:
        showResult("colormask:mask",mask*255)
        
    return mask
Esempio n. 3
0
def find_possible_chars(thr, isdebug=False):
    #
    tmp = thr.copy()
    img_contour, contours, npaHierarchy = cv2.findContours(
        tmp, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)  # find all contours
    height, width = thr.shape
    img_contour = np.zeros((height, width, 3), np.uint8)
    #
    filtered = []  # this will be the return value
    count = 0
    for i in range(0, len(contours)):  # for each contour

        if isdebug:
            cv2.drawContours(img_contour, contours, i, SCALAR_WHITE)

        contour = Contour(contours[i])

        if contour.checkIfPossibleChar(
        ):  # if contour is a possible char, note this does not compare to other chars (yet) . . .
            count += 1  # increment count of possible chars
            filtered.append(contour)  # and add to list of possible chars

    if isdebug:
        print "\nstep 2 - contours = " + str(
            len(contours))  # 2362 with MCLRNF1 image
        print "step 2 - chars = " + str(count)  # 131 with MCLRNF1 image
        showResult("contours", img_contour)
    return filtered
Esempio n. 4
0
def tophatblackhat(gray):
    gray = cv2.GaussianBlur(gray,(3,3),0)
    rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 3))
    tophat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, rectKernel)
    blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
    showResult("tophat",tophat)
    showResult("blackhat",blackhat)
Esempio n. 5
0
def removeshadow(img):
    #
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = 255 - gray
    hsv[:, :, 2] = gray
    bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    #
    rgb_planes = cv2.split(bgr)

    result_planes = []
    result_norm_planes = []
    for plane in rgb_planes:
        dilated_img = cv2.dilate(plane, np.ones((7, 7), np.uint8))
        bg_img = cv2.medianBlur(dilated_img, 21)
        diff_img = 255 - cv2.absdiff(plane, bg_img)
        norm_img = diff_img.copy()
        norm_img = cv2.normalize(diff_img,
                                 norm_img,
                                 alpha=0,
                                 beta=255,
                                 norm_type=cv2.NORM_MINMAX,
                                 dtype=cv2.CV_8UC1)
        result_planes.append(diff_img)
        result_norm_planes.append(norm_img)

    result = cv2.merge(result_planes)
    result_norm = cv2.merge(result_norm_planes)

    showResult("result", result)
    showResult("result_norm", result_norm)
    return result, result_norm
Esempio n. 6
0
def SimpleRecognizePlate(image):
    t0 = time.time()
    bboxes, images = detect_by_probability(
        image)  #detect.detectPlateRough(image)
    if images is None:
        return
    for image in images:
        #image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        image = cv2.resize(image, (136, 36))
        image = kojy_gray(image)
        #image = maximizeContrast(255 - image)

        image_gray = fm.findContoursAndDrawBoundingBox(image)
        showResult("plate-colr", image)
        showResult("plate-gray,", image_gray)

        blocks, res, confidence = segmentation.slidingWindowsEval(image_gray)
        #for i in range(len(blocks)):
        #    showResult("plate-gray,",blocks[i])
        if confidence > 4.5:
            print "车牌:", res, "置信度:", confidence
        else:
            print "不确定的车牌:", res, "置信度:", confidence

    print time.time() - t0, "s"
Esempio n. 7
0
 def setcontours(self,isdebug=False):
     #self.img_contour, self.contours, npaHierarchy = cv2.findContours(self.compose, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)   # find all contours        
     self.contours, npaHierarchy = cv2.findContours(self.compose, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)   # find all contours        
     self.img_contour = self.compose
     if isdebug:
         cv2.drawContours(self.img_contour, self.contours, -1, SCALAR_WHITE, -1)
         print "\nstep 2 - contours = " + str(len(self.contours))
         showResult("contours",self.img_contour)
Esempio n. 8
0
def mask2plates(img, finalmask, isdebug=False):
    #showResult("masktest",finalmask)
    #ret,binary = cv2.threshold(finalmask,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\
    binary = compositeThreshold(finalmask, mode='otsu')
    closing = close(binary)
    if isdebug:
        showResult("mask2plates:masktest", closing)
    # Find Candidate
    return findBBox(img, closing, isdebug=isdebug)
Esempio n. 9
0
def compositeThreshold(gray, mode='com'):
    if mode == 'otsu':
        otsu = threshold_otsu(gray)
        otsu_bin = gray > otsu
        otsu_bin = otsu_bin.astype(np.uint8) * 255
        return otsu_bin
    elif mode == 'yen':
        yen = threshold_yen(gray)
        yen_bin = gray > yen
        yen_bin = yen_bin.astype(np.uint8) * 255
        return yen_bin
    elif mode == 'li':
        li = threshold_li(gray)
        li_bin = gray > li
        li_bin = li_bin.astype(np.uint8) * 255
        return li_bin
    elif mode == 'niblack':
        niblack = threshold_niblack(gray, window_size=13, k=0.8)
        niblack_bin = gray > niblack
        niblack_bin = niblack_bin.astype(np.uint8) * 255
        return niblack_bin
    elif mode == 'sauvola':
        sauvola = threshold_sauvola(gray, window_size=13)
        sauvola_bin = gray > sauvola
        sauvola_bin = sauvola_bin.astype(np.uint8) * 255
        return sauvola_bin
    elif mode == 'com':
        li = threshold_li(gray)
        li_bin = gray > li
        li_bin = li_bin.astype(np.uint8) * 255
        otsu = threshold_otsu(gray)
        otsu_bin = gray > otsu
        otsu_bin = otsu_bin.astype(np.uint8) * 255
        yen = threshold_yen(gray)
        yen_bin = gray > yen
        yen_bin = yen_bin.astype(np.uint8) * 255
        return cv2.min(cv2.min(otsu_bin, li_bin), yen_bin)
    elif mode == "niblack-multi":
        thr = np.zeros((gray.shape), dtype=np.uint8)
        thr[thr >= 0] = 255
        for k in np.linspace(-0.8, 0.2, 5):  #(-1.8,0.2,5)
            thresh_niblack = threshold_niblack(gray, window_size=25, k=k)
            binary_niblack = gray > thresh_niblack
            binary_niblack = binary_niblack.astype(np.uint8) * 255
            showResult("binary_niblack", binary_niblack)
            thr = cv2.min(thr, binary_niblack)
        return thr
    else:
        sauvola = threshold_sauvola(gray, window_size=25, k=0.25)
        sauvola_bin = gray > sauvola
        sauvola_bin = sauvola_bin.astype(np.uint8) * 255
        niblack = threshold_niblack(gray, window_size=25, k=0.25)
        niblack_bin = gray > niblack
        niblack_bin = niblack_bin.astype(np.uint8) * 255
        return cv2.max(sauvola, niblack)
Esempio n. 10
0
def drawChars(img, title, contours, colr=SCALAR_WHITE, isdebug=False):
    img_contours = np.zeros((img.shape), np.uint8)

    contours_ = []
    for contour in contours:
        contours_.append(contour.contour)

    cv2.drawContours(img_contours, contours_, -1, colr)
    if isdebug:
        showResult(title, img_contours)
    return img_contours
Esempio n. 11
0
def colormask(image, isday=True, tgtcolr='Default', isdebug=False):
    #
    #image = NormalizeT(image)
    #showResult("normalized",image)
    # blue mask + yellow mask
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)
    sat = (s.copy()).astype('float')
    val = (v.copy()).astype('float')
    sat /= 255
    val /= 255
    #
    hue = np.zeros(image.shape[:2])
    hue[hue >= 0] = 0.2
    #
    blue1 = h.copy()
    blue2 = h.copy()
    yellow = h.copy()
    #
    if isday:
        blue1 = np.where((h < 105) | (h > 135) | (v < 40) | (s < 40), 0, 255)
        blue2 = np.where((h < 100) | (h > 140) | (v < 30) | (s < 30), 0, 255)
        yellow = np.where((h < 20) | (h > 40) | (v < 40) | (s < 40), 0, 255)
    else:
        blue1 = np.where((h < 105) | (h > 135), 0, 255)
        blue2 = np.where((h < 100) | (h > 140), 0, 255)
        yellow = np.where((h < 20) | (h > 40), 0, 255)
    #
    for case in switch(tgtcolr):
        if case('Blue'):
            hue[blue2 > 0] = 0.5
            hue[blue1 > 0] = 1.0
            hue[yellow > 0] = 0.1
            break
        if case('Yellow'):
            hue[yellow > 0] = 1.0
            hue[blue2 > 0] = 0.1
            hue[blue1 > 0] = 0.15
            break
        if case('Default'):
            hue[blue2 > 0] = 0.4
            hue[blue1 > 0] = 0.9
            hue[yellow > 0] = 0.7
            break
    #
    if isday:
        mask = (hue * sat * val)**2  #(hue*sat*val)**2
    else:
        mask = (hue * sat)**2
    #
    if isdebug:
        showResult("colormask:mask", mask)

    return mask
Esempio n. 12
0
def detect_by_seg_gf(origin,
                     isdebug=False):
    if origin is None:
        return None
    # Default Size
    h,w,c = origin.shape
    size = 200.0
    #origin = opencv2skimage(origin)
    # Resize
    img = cv2.resize(origin,(int(w*size/h),int(size)))
    # Blur
    blur = cv2.GaussianBlur(img,(5,5),3)
    # Equalization Hist
    #origin = equalizehist(origin)
    
    # Extract Good Features
    corners = refinedGoodFeatures(origin,img,
                                  model='LP')
    corners_,handwrite =  refinedCorners(img,corners,False)
    checkFeatures(img,corners,True)
    # Opencv2Skimage
    skimg = cv2.cvtColor(blur, cv2.COLOR_BGR2RGB)
    # Segmentation
    out,labels = seg(skimg,Debug=False)
    # Eval Label
    labels = refineLabels(out,labels,corners_,howmany=20)
    # Show Result
    out = drawLabels(skimg,labels,Debug=isdebug)
    if out is None:
        return None
    changebgcolr(out,labels)
    #showResult("labelout",skimage2opencv(out))
    # Find Candidate 
    bboxes = findBBox(img,out,
                      model='LP',
                      debug=isdebug)
    # Resize
    if bboxes is not None:
        bboxes = resizeBBoxes(bboxes,h/size)
    # Check Candidate
    if isdebug and bboxes is not None:
        drawBBox(origin,bboxes,debug=isdebug)
    # Crop Rois
    rois = BBoxes2ROIs(origin,bboxes)
    if isdebug and rois is not None:
        for i in range(len(rois)):
            showResult("cropped",rois[i])
    '''
    if isdebug:
        #labels2boundaries(labels)
        contours = labels2contours(labels)
        drawBBox(img,contours,debug=True)
    '''
    return bboxes,rois
Esempio n. 13
0
def mkfinalmask(image, gf_image=None, isday=True, isdebug=False):
    colrmask = colormask(image, isday)
    regmask = regionmask(image)
    mask = colrmask * regmask
    if gf_image is not None:
        gfmask = featuremask(gf_image)
        mask *= gfmask

    mask = (mask * 255).astype('uint8')
    if isdebug:
        showResult("mkfinalmask:mask", mask * 255)
    return mask
Esempio n. 14
0
 def finalize(self, isdebug=False):
     strings_ = self.makeup(isdebug=isdebug)
     # need ocr or not
     strings_ = VIN.ocrchecking(self.bgr, strings_, False)
     #
     if strings_ is not None:
         marked = String.mark(self.bgr, strings_)
         if isdebug:
             showResult("marked", marked)
             #drawChars((self.height,self.width),"final",string.getitems(),isdebug=isdebug)
             #print string.result
         return True, strings_[0].confidence, marked
     else:
         return False, 0.0, None
Esempio n. 15
0
def mkfinalmasks(image, gf_image=None, isday=True, isdebug=False):
    masks = []
    regmask = regionmask(image)
    for i in range(2):
        colrmask = colormask(image, isday, tgtcolr=colrs[i], isdebug=isdebug)
        mask = colrmask * regmask
        if gf_image is not None:
            gfmask = featuremask(gf_image)
            mask *= gfmask
        mask = (mask * 255).astype('uint8')
        if isdebug:
            showResult("mkfinalmask:mask", mask)
        masks.append(mask)
    return masks
Esempio n. 16
0
 def makesegments(image,string):
     string.ROI = String.extractROI(image,string,False)
     bgr = string.ROI.cropped
     chars =  string.chars
     FirstIndex = 0
     LastIndex = string.charcount - 1
     h,w = bgr.shape[:2]
     delta = w - (chars[LastIndex].brX + chars[LastIndex].brW - chars[FirstIndex].brX)
     first = chars[0].brX
     for char in chars:
         x = char.brX - first + delta
         segment = bgr[1:string.charheight,x:x+string.charwidth,:]
         string.segments.append(segment)
         showResult("segment",segment)
Esempio n. 17
0
def recognizeLP(gray):
    t0 = time.time()
    if gray is None:
        return

    gray = cv2.resize(gray, (136, 36))

    blocks, res, confidence = segmentation.slidingWindowsEval(gray)
    for i in range(len(blocks)):
        showResult("plate-gray,", blocks[i])
    if confidence > 4.5:
        print "车牌:", res, "置信度:", confidence
    else:
        print "不确定的车牌:", res, "置信度:", confidence

    print time.time() - t0, "s"

    return blocks, res, confidence
Esempio n. 18
0
def detect_by_cascade(origin, resize_h=720, en_scale=1.06, isdebug=False):
    if origin is None:
        return None
    # Default Size
    height, width, channels = origin.shape
    # Resize
    resized = cv2.resize(origin, (int(width * resize_h / height), resize_h))
    # Colr2Gray
    gray = cv2.cvtColor(resized, cv2.COLOR_RGB2GRAY)
    # Detect by Cascade
    watches = watch_cascade.detectMultiScale(gray,
                                             en_scale,
                                             1,
                                             minSize=(36, 9))

    cropped_images = []
    bboxes = []
    for (x, y, w, h) in watches:
        xmin = x - w * 0.1
        ymin = y - h * 0.6
        xmin = xmin if xmin >= 0 else 0
        xmin = ymin if ymin >= 0 else 0
        xmax = xmin + 1.2 * w
        ymax = ymin + 1.1 * h
        bboxes.append(
            np.array([[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]))

        cropped = cropped_from_image(
            gray, (int(xmin), int(ymin), int(1.2 * w), int(1.1 * h)))
        cropped_images.append(cropped)
    # Resize
    if bboxes is not None:
        bboxes = resizeBBoxes(bboxes, height / float(resize_h))

    # Check Result
    if isdebug and bboxes is not None:
        drawBBox(origin, bboxes, debug=True)
    # Crop Rois
    rois = BBoxes2ROIs(origin, bboxes)
    if isdebug and rois is not None:
        for i in range(len(rois)):
            showResult("cropped", rois[i])

    return bboxes, rois  #cropped_images
Esempio n. 19
0
def detect_by_probability(origin,
                 isdebug=False):
    if origin is None:
        return None
    # Default Size
    h,w,c = origin.shape
    size = 200.0
    # Resize
    img = cv2.resize(origin,(int(w*size/h),int(size)))
    #showResult("img",img)
    #  
    if dayornight(img):
        # Extract Good Features
        corners = refinedGoodFeatures(origin,img)
        mask = checkFeatures(img,corners,False)
        closing=close(mask)
        refined_gfmask = refine_gfimage(img,closing)
        #showResult("refined_gfmask",refined_gfmask)
        finalmask = mkfinalmask(img,refined_gfmask,isday=True)
    else:
        finalmask = mkfinalmask(img,None,isday=False)
    #
    #showResult("masktest",finalmask)
    #ret,binary = cv2.threshold(finalmask,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    binary = compositeThreshold(finalmask,mode='otsu')
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))
    closing=cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
    if isdebug:
        showResult("masktest",closing)
    # Find Candidate
    bboxes = findBBox(img,closing,isdebug=True)
    # Resize
    if bboxes is not None:
        bboxes = resizeBBoxes(bboxes,h/size)
    # Check Result
    if isdebug and bboxes is not None:
        drawBBox(origin,bboxes,debug=True)
    # Crop Rois
    rois = BBoxes2ROIs(origin,bboxes)
    if isdebug and rois is not None:
        for i in range(len(rois)):
            showResult("cropped",rois[i])
            
    return bboxes,rois
Esempio n. 20
0
def drawChars(shape, title, chars, colr=SCALAR_WHITE, isdebug=False):
    h, w = shape
    ratio = 1
    vis = np.zeros((h, w * ratio, 3), np.uint8)
    img_contours = np.zeros((shape), np.uint8)

    for i, char in enumerate(chars):
        cv2.drawContours(img_contours, [char.contour], -1, colr, -1)
        if isdebug:
            [x, y] = char.brX, char.brY
            cv2.putText(vis, str(i), (x * ratio, y),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 128, 128), 1,
                        cv2.LINE_AA)
            contour = char.contour.copy()
            contour[:, :, 0] *= ratio
            cv2.drawContours(vis, [contour], -1, colr, -1)
    if isdebug:
        showResult(title, vis)
    return img_contours
Esempio n. 21
0
def refinedCorners(img,
                   corners,
                   isdebug=False):
    #http://scikit-image.org/docs/dev/auto_examples/edges/plot_skeleton.html
    mark = img.copy()
    mark[mark>=0] = 0
    #img = cv2.drawKeypoints(img,corners)    
    for i in corners:
        x,y = i.ravel()
        cv2.circle(mark,(int(x),int(y)),1,(255,255,255),5)
    dilate = cv2.dilate(mark[:,:,0],(3,3),iterations=1)
    erode = cv2.dilate(dilate,(3,3),iterations=1)
    closing = morphological(erode,cv2.MORPH_CLOSE)
    erode = cv2.dilate(closing,(3,3),iterations=1)
    #erode = cv2.erode(closing,(3,3),iterations=1)
    #ret,thr = cv2.threshold(erode,0,255,cv2.THRESH_BINARY)
    binary = erode#mark[:,:,0]
    binary[binary>0]=1
    skeleton = img_as_ubyte(morphology.skeletonize_3d(binary))#skeletonize(binary))
    skeleton[skeleton > 0] = 255
    dilate = cv2.dilate(skeleton,(3,3))
    corners_ = np.transpose(np.nonzero(skeleton))
    cv_corners = []
    for corner in corners_:
        cv_corners.append([np.array([corner[1],corner[0]])])
    if isdebug:
        for pt in cv_corners:
            cv2.circle(img,(pt[0][0],pt[0][1]),1,(255,255,255),1)
        showResult("mark",mark)
        showResult("skeleton",skeleton)
        showResult("img",img)

    return cv_corners,mark[:,:,0]
    def first_filtering(self, isdebug=False):

        img_contour, contours, npaHierarchy = cv2.findContours(
            self.thr, cv2.RETR_LIST,
            cv2.CHAIN_APPROX_SIMPLE)  # find all contours
        height, width = self.thr.shape
        img_contour = np.zeros((height, width, 3), np.uint8)

        cheights = []
        cwidths = []
        for contour in contours:

            if isdebug:
                cv2.drawContours(img_contour, [contour], -1, SCALAR_WHITE)

            [x, y, w, h] = cv2.boundingRect(contour)
            braRatio = float(w) / float(h)  #aspect ratio

            if (w*h > MIN_PIXEL_AREA and\
                w > MIN_PIXEL_WIDTH and\
                h > self.height*0.4 and\
                0.04 < braRatio and\
                braRatio < 1.25):
                self.contours.append(contour)
                cwidths.append(w)
                cheights.append(h)

        if len(cwidths) < 2:
            return None

        self.charheight = statistics.median_high(cheights)
        self.charwidth = statistics.median_high(cwidths)
        if isdebug:
            print("\nfiltering step 1 - contours = " + str(len(contours)))
            showResult("contours", img_contour)
        return self.contours
    def process(self, roi=None, mode="Blue", isdebug=False):
        self.initialize()
        if roi is not None:
            self.originheight, self.originwidth = roi.shape[:2]
            self.bgr = cv2.resize(roi, (self.width, self.height),
                                  interpolation=cv2.INTER_CUBIC)
            self.preprocess(mode=mode)
        #
        if isdebug:
            self.showAll()

        # filtering contours
        if self.filtering_contours(isdebug=isdebug) is False:
            return 0.0
        #
        #https://namkeenman.wordpress.com/2015/12/18/open-cv-determine-angle-of-rotatedrect-minarearect/
        #https://docs.opencv.org/2.4/modules/core/doc/basic_structures.html?highlight=rotatedrect#RotatedRect::RotatedRect()
        #
        # calculate x range of LP
        self.estimateLP(isdebug=isdebug)
        if isdebug:
            tmp = self.bgr.copy()
            cv2.line(tmp, (self.charleftmost, self.height / 2),
                     (self.charrightmost, self.height / 2), (34, 222, 0), 2)
            showResult("test", tmp)
        # crop and warp, then segment
        self.correctImg(isdebug=isdebug)
        # detect colr
        #self.detectcolr()
        # makecompose
        self.makeCompose()
        # makesegments
        self.makesegments(self.compose)
        # calculate confidence
        self.calculateconfidence()
        return self.confidence
Esempio n. 24
0
 def showAll(self):
     showResult("img", self.bgr)
     #showResult("gray",self.gray)
     #showResult("sobel",self.sobel)
     #showResult("entropy",self.entropy)
     showResult("DoG", self.DoG)
     #showResult("garbor",self.garbor)
     #showResult("laplacian",self.laplacian)
     #showResult("thr",self.lthr)
     #showResult("contour",self.contour)
     showResult("compose", self.compose)
Esempio n. 25
0
def detect_by_contour(origin, isdebug=False):
    # Initialize
    height, width, numChannels = origin.shape
    img_gray = np.zeros((height, width, 1), np.uint8)
    img_thr = np.zeros((height, width, 1), np.uint8)
    img_contour = np.zeros((height, width, 3), np.uint8)
    # Grayscale
    img_gray, img_thr = preprocess(origin)
    if isdebug:
        showResult("img_gray", img_gray)
        showResult("img_thr", img_thr)
        #showResult("Test",cv2.Canny(img_gray,50,200))
    # First Filtering(Contours2Chars)
    chars = find_possible_chars(img_thr)
    if isdebug:
        print "step 2 - the numbder of suspicious chars(roughly filtered contours) = " + str(
            len(chars))
        drawChars(img_contour, "first-filtering", chars, isdebug=True)
    # Second Filtering(Chars2Strings)
    strings = Contour.findListOfListsOfMatchingChars(chars)
    if isdebug:  # show steps #######################################################
        print "step 3 - strings.Count = " + str(
            len(strings))  # 13 with MCLRNF1 image
        img_contour = np.zeros((height, width, 3), np.uint8)

        for string in strings:
            (b, g, r) = (random.randint(0, 255), random.randint(0, 255),
                         random.randint(0, 255))
            img_contour = drawChars(img_contour, "second-filtering", string,
                                    (b, g, r))
    # Third Filtering(String2ROIs)
    ROIs = []
    bboxes = []
    for string in strings:  # for each group of matching chars
        roi = extractROI(origin, string)  # attempt to extract plate

        if roi.imgPlate is not None:  # if plate was found
            ROIs.append(roi)  # add to list of possible plates
            bbox = cv2.boxPoints(roi.rrLocationOfPlateInScene)
            ibbox = bbox.astype(int)
            bboxes.append(ibbox)

    #
    print "\n" + str(
        len(ROIs)) + " possible plates found"  # 13 with MCLRNF1 image

    if isdebug and len(
            ROIs
    ) != 0:  # show steps #######################################################

        for i in range(len(ROIs)):
            p2fRectPoints = cv2.boxPoints(ROIs[i].rrLocationOfPlateInScene)

            cv2.line(origin, tuple(p2fRectPoints[0]), tuple(p2fRectPoints[1]),
                     SCALAR_RED, 2)
            cv2.line(origin, tuple(p2fRectPoints[1]), tuple(p2fRectPoints[2]),
                     SCALAR_RED, 2)
            cv2.line(origin, tuple(p2fRectPoints[2]), tuple(p2fRectPoints[3]),
                     SCALAR_RED, 2)
            cv2.line(origin, tuple(p2fRectPoints[3]), tuple(p2fRectPoints[0]),
                     SCALAR_RED, 2)
            showResult("roi", ROIs[i].imgPlate)

        showResult("result", origin)

    return bboxes
 def drawChars(self, title, colr=SCALAR_WHITE, isdebug=False):
     img_contours = np.zeros((self.bgr.shape), np.uint8)
     cv2.drawContours(img_contours, self.contours, -1, colr, -1)
     if isdebug:
         showResult(title, img_contours)
     return img_contours
 def debug(self, img, xs):
     tmp = img.copy()
     for x in xs:
         cv2.circle(tmp, (int(x), self.height / 2), 2, (255, 0, 0), 2)
     showResult("debug", tmp)
 def showAll(self):
     showResult("roi", self.bgr)
     showResult("gray", self.gray)
     showResult("laplacian", self.laplacian)
     showResult("DoG", self.DoG)
     showResult("thr", self.thr)
Esempio n. 29
0
 def showall(self):
     showResult("laplacian", self.laplacian)
     showResult("lthr", self.lthr)
     #showResult("DoG",self.DoG)
     showResult("tophat", self.tophat)
     showResult("compose", self.compose)
Esempio n. 30
0
def labels2boundaries(labels):
    #http://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.find_boundaries
    cleared = clear_border(labels)
    boundary = find_boundaries(cleared, mode='inner').astype(np.uint8)
    boundary[boundary != 0] = 255
    showResult("boundaries",boundary)