Esempio n. 1
0
 def apply_skin_mask(self, frame):
     # transform from rgb to hsv
     hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
     # detect skin using the hand histogram
     self.skin_mask = cv2.calcBackProject([hsv], [0, 1], self.hand_histogram, [0, 180, 0, 256], 1)
     # create a elliptical kernel (11 is the best in my case)
     kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
     cv2.filter2D(self.skin_mask, -1, kernel, self.skin_mask)
     # Apply gaussian filter to give much better result
     cv2.GaussianBlur(self.skin_mask, (3, 3), 0, self.skin_mask)
     # change the threshold to suit the brightness (20-30 gave me best results so far)
     _, thresh = cv2.threshold(self.skin_mask, 20, 255, 0)
     thresh = cv2.merge((thresh, thresh, thresh))
     # Mask the hand from the original frame
     self.skin_mask = cv2.bitwise_and(frame, thresh)
     # Apply gaussian filter to give much cleaner result
     cv2.GaussianBlur(self.skin_mask, (5, 5), 0, self.skin_mask)
     # remove faulty skin (kernel of size 9x9)
     cv2.morphologyEx(self.skin_mask, cv2.MORPH_OPEN, (31, 31), self.skin_mask, iterations=5)
     # reduce black holes in the hand
     cv2.morphologyEx(self.skin_mask, cv2.MORPH_CLOSE, (9, 9), self.skin_mask, iterations=5)
     # Show skin detection result if DEBUGGING
     if self.DEBUGGING:
         cv2.imshow('SKIN', self.skin_mask)
     return self.skin_mask
def GetGlints(gray,thr):
    tempResultImg = cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR) #used to draw temporary results

    props = RegionProps()
    val,binI = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY) #Using non inverted binary image
    
    #Combining opening and dialiting seems to be the best but is it ok that other glints are visible two?????!!!!!
    st7 = cv2.getStructuringElement(cv2.MORPH_CROSS,(7,7))
    st9 = cv2.getStructuringElement(cv2.MORPH_CROSS,(7,7))
    
    binI= cv2.morphologyEx(binI, cv2.MORPH_OPEN, st7)
    binI = cv2.morphologyEx(binI, cv2.MORPH_DILATE, st9, iterations=2)
    
    cv2.imshow("ThresholdGlints",binI)
    #Calculate blobs
    sliderVals = getSliderVals() #Getting slider values
    contours, hierarchy = cv2.findContours(binI, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #Finding contours/candidates for pupil blob
    glints = []
    glintEllipses = []
    for cnt in contours:
        values = props.CalcContourProperties(cnt,['Area','Length','Centroid','Extend','ConvexHull']) #BUG - Add cnt.astype('int') in Windows
        if values['Area'] < sliderVals['maxSizeGlints'] and values['Area'] > sliderVals['minSizeGlints']:
            glints.append(values)
            centroid = (int(values['Centroid'][0]),int(values['Centroid'][1]))
            cv2.circle(tempResultImg,centroid, 2, (0,0,255),4)
            glintEllipses.append(cv2.fitEllipse(cnt))
    cv2.imshow("TempResults",tempResultImg)
    return glintEllipses
 def applyMorphologicalCleaning(self, image):
 	"""
 	Applies a variety of morphological operations to improve the detection
 	of worms in the image.
 	Takes 0.030 s on MUSSORGSKY for a typical frame region
 	Takes 0.030 s in MATLAB too
 	"""
     # start with worm == 1
     image = image.copy()
     segmentation.clear_border(image)  # remove objects at edge (worm == 1)
     # fix defects in the thresholding by closing with a worm-width disk
     # worm == 1
     wormSE = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                        (self.wormDiskRadius+1,
                                        	self.wormDiskRadius+1))
     imcl = cv2.morphologyEx(np.uint8(image), cv2.MORPH_CLOSE, wormSE)
     imcl = np.equal(imcl, 1)
     # fix defects by filling holes
     imholes = ndimage.binary_fill_holes(imcl)
     imcl = np.logical_or(imholes, imcl)
     # fix barely touching regions
     # majority with worm pixels == 1 (median filter same?)
     imcl = nf.median_filter(imcl, footprint=[[1, 1, 1],
                                              [1, 0, 1],
                                              [1, 1, 1]])
     # diag with worm pixels == 0
     imcl = np.logical_not(bwdiagfill(np.logical_not(imcl)))
     # open with worm pixels == 1
     openSE = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
     imcl = cv2.morphologyEx(np.uint8(imcl), cv2.MORPH_OPEN, openSE)
     return np.equal(imcl, 1)
    def background_substraction(image):
        """
        Segments the garment from the background. This implementation returns colorful and dark
        objects as garments, and white and clear objects as background.
        :param image: Input image
        :return: Segmentation mask where white is garment and black is background
        """
        # Convert to HSV color space
        image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

       # Threshold value and saturation (using Otsu for threshold selection)
        blur_s = cv2.GaussianBlur(image_hsv[:, :, 1],(5,5),0)
        ret, mask_s = cv2.threshold(blur_s, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    #    cv2.imshow("----", mask_s)

        blur_v = cv2.GaussianBlur(image_hsv[:, :, 2],(5,5),0)
        ret, mask_v = cv2.threshold(blur_v, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
        mask = cv2.bitwise_and(mask_s, mask_v)

        # Filter result using morphological operations (closing)
        kernel = np.ones((5,5),np.uint8)
        filtered_mask_close = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=5)
        filtered_mask_open = cv2.morphologyEx(filtered_mask_close, cv2.MORPH_OPEN, kernel, iterations=8)

        return filtered_mask_open
def GetPupil(gray,thr):
    tempResultImg = cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR) #used to draw temporary results

    props = RegionProps()
    val,binI = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY_INV)
    
    #Combining Closing and Opening to the thresholded image
    st7 = cv2.getStructuringElement(cv2.MORPH_CROSS,(7,7))
    st9 = cv2.getStructuringElement(cv2.MORPH_CROSS,(9,9))
    st15 = cv2.getStructuringElement(cv2.MORPH_CROSS,(15,15))
             
    binI = cv2.morphologyEx(binI, cv2.MORPH_CLOSE, st9) #Close 
    binI= cv2.morphologyEx(binI, cv2.MORPH_OPEN, st15) #Open
    binI = cv2.morphologyEx(binI, cv2.MORPH_DILATE, st7, iterations=2) #Dialite  
    
    cv2.imshow("ThresholdPupil",binI)
    #Calculate blobs
    sliderVals = getSliderVals() #Getting slider values
    contours, hierarchy = cv2.findContours(binI, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #Finding contours/candidates for pupil blob
    pupils = []
    pupilEllipses = []
    for cnt in contours:
        values = props.CalcContourProperties(cnt,['Area','Length','Centroid','Extend','ConvexHull']) #BUG - Add cnt.astype('int') in Windows
        if values['Area'] < sliderVals['maxSizePupil'] and values['Area'] > sliderVals['minSizePupil'] and values['Extend'] < 0.9:
            pupils.append(values)
            centroid = (int(values['Centroid'][0]),int(values['Centroid'][1]))
            cv2.circle(tempResultImg,centroid, 2, (0,0,255),4)
            pupilEllipses.append(cv2.fitEllipse(cnt))
    cv2.imshow("TempResults",tempResultImg)
    return pupilEllipses 
def processImage(pos):
    if(pos<1):
        return
    
    imgOriginal=cv2.imread('coins.png',0)
    retval, img=cv2.threshold(imgOriginal, 0, 255, cv2.THRESH_OTSU)
    
    #Close the entire image using a kernel defined by the user.
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (pos,pos))
    closedImage=cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
    
    #Use a second kernel to remove the small coins from the image by subtracting
    #closed image minus the large coins only image
    kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (pos+40,pos+40))
    largeCoinsImg = cv2.morphologyEx(closedImage, cv2.MORPH_OPEN, kernel)
    smallCoinsImg = closedImage - largeCoinsImg
    
    #Performing the erosion processing
    smallCoinsImg = cv2.morphologyEx(smallCoinsImg, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (19, 19)))
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (35,35))
    smallCoinsImg = cv2.erode(smallCoinsImg, kernel)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (35,35))
    smallCoinsImg = cv2.dilate(smallCoinsImg, kernel)
    finalRes = colorImage(imgOriginal, smallCoinsImg, largeCoinsImg)
    
    #Show the trackbar and the image
    cv2.imshow("Final Result", finalRes)
    cv2.imwrite("Final Image.jpg", finalRes)
    def closing(self, size=(5, 5), binary_in=False):
        """
        morphological closing
        Parameters
        ----------
        size : 'tuple'
            kernel size
        binary_in : 'boole'
            run on binary

        Returns
        -------
        self.img_b : 'array'
            new binary array
        self.img : 'array'
            new img array

        """
        kernel = np.ones(size, np.uint8)

        if binary_in:
            closed = cv2.morphologyEx(self.img_b, cv2.MORPH_CLOSE, kernel)
            self.img_b = closed
        else:
            closed = cv2.morphologyEx(self.img, cv2.MORPH_CLOSE, kernel)
            self.img = closed
Esempio n. 8
0
def get_library():
    # READ THE LIBRARY IMAGES
    img_cube = cv2.imread('./Library/Cube.jpg')
    img_hexagon = cv2.imread('./Library/Hexagon.jpg')
    img_star = cv2.imread('./Library/Star.jpg')

    # CANNY EDGE ALGORITHM
    img_cube = cv2.Canny(img_cube, 1200, 100)
    img_hexagon = cv2.Canny(img_hexagon, 1200, 100)
    img_star = cv2.Canny(img_star, 1200, 100)

    # OPENING (EROSION FOLLOWED BY DILATION)
    kernel = np.ones((5, 5), np.uint8)
    img_cube = cv2.morphologyEx(img_cube, cv2.MORPH_GRADIENT, kernel)
    img_hexagon = cv2.morphologyEx(img_hexagon, cv2.MORPH_GRADIENT, kernel)
    img_star = cv2.morphologyEx(img_star, cv2.MORPH_GRADIENT, kernel)

    # THRESHOLD (INVERSE BINARY)
    ret, img_cube = cv2.threshold(img_cube, 0, 255, cv2.THRESH_BINARY_INV)
    ret, img_hexagon = cv2.threshold(
        img_hexagon, 0, 255, cv2.THRESH_BINARY_INV)
    ret, img_star = cv2.threshold(img_star, 0, 255, cv2.THRESH_BINARY_INV)

    # SURF - FIND KEYPOINTS AND DESCRIPTORS
    surf = cv2.SURF()
    (cube_kpts, cube_dpts) = surf.detectAndCompute(img_cube, None)
    (hexagon_kpts, hexagon_dpts) = surf.detectAndCompute(img_hexagon, None)
    (star_kpts, star_dpts) = surf.detectAndCompute(img_star, None)

    # LIBRARY IMAGE DICTIONARY - IMAGE:DESCRIPTORS
    library = {CUBE: cube_dpts, HEXAGON: hexagon_dpts, STAR: star_dpts}
    return library
Esempio n. 9
0
def segment(r, c, finger_print, fp_copy):
# Image segmentation based on variance and mathematical morphology

    W = 16
    threshold = 1600
    A = np.zeros((r,c), np.uint8)

    for i in np.arange(0,r-1,W):
        for j in np.arange(0,c-1,W):
            Mw = (1/(W*W)) * (sum(finger_print[i:i+W,j:j+W]))
            Vw = (1/(W*W)) * (sum((finger_print[i:i+W,j:j+W] - Mw)**2))
            
            if (Vw < threshold).all():
                finger_print[i:i+W,j:j+W] = 0
                A[i:i+W,j:j+W] = 0
            else:
                A[i:i+W,j:j+W] = 1

    kernel = np.ones((44,44),np.uint8)
    closing_mask = cv2.morphologyEx(A, cv2.MORPH_CLOSE, kernel)
    kernel = np.ones((88,88),np.uint8)
    opening_mask = cv2.morphologyEx(closing_mask, cv2.MORPH_OPEN, kernel)

    for i in np.arange(0,r-1,W):
        for j in np.arange(0,c-1,W):
            if ((sum(finger_print[i:i+W,j:j+W])) != (sum(opening_mask[i:i+W,j:j+W]))).all():
                if np.mean(opening_mask[i:i+W,j:j+W]) == 1:
                    finger_print[i:i+W,j:j+W] = fp_copy[i:i+W,j:j+W]
                elif  np.mean(opening_mask[i:i+W,j:j+W]) == 0:
                    finger_print[i:i+W,j:j+W] = 0

    return finger_print, opening_mask
    def apply_skin_mask(self, frame):
        # transform from rgb to hsv
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # detect skin using the hand histogram
        self.skin_mask = cv2.calcBackProject([hsv], [0, 1], self.hand_histogram, [0, 180, 0, 256], 1)
        # create a elliptical kernel (12 is the best in my case)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
        cv2.filter2D(self.skin_mask, -1, kernel, self.skin_mask)
        # Apply gaussian filter on the result to give better result
        cv2.GaussianBlur(self.skin_mask, (3, 3), 0, self.skin_mask)
        # change the threshold to suit the brightness (20-30 gave me best results so far)
        _, thresh = cv2.threshold(self.skin_mask, 20, 255, 0)
        # Apply gaussian filter on the result to give better result
        cv2.GaussianBlur(self.skin_mask, (5, 5), 0, self.skin_mask)

        # Trying other types of threshold (all of them didn't work)
        # _, thresh = cv2.adaptiveThreshold(skin_mask, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 0, 11, 2)
        # _, thresh = cv2.threshold(skin_mask, 0, 255, 0+cv2.THRESH_OTSU) #OTSU Threshold don't give satisfying results

        # Multichannel
        thresh = cv2.merge((thresh, thresh, thresh))
        # Mask the hand from the original frame
        self.skin_mask = cv2.bitwise_and(frame, thresh)
        # remove faulty skin (kernel of size 9x9)
        self.skin_mask = cv2.morphologyEx(self.skin_mask, cv2.MORPH_OPEN, (31, 31), iterations=5)
        # reduce black spaces in the hand
        cv2.morphologyEx(self.skin_mask, cv2.MORPH_CLOSE, (9, 9), self.skin_mask, iterations=5)
        # Draw Skin masking (JFD)
        if self.DEBUGGING:
            cv2.imshow('skin mask', self.skin_mask)
        return self.skin_mask
Esempio n. 11
0
def get_orange_box_points(cam2):
    _,im = cam2.read()

    ###############SETTINGS FOR GREEN BOXES################
    lower_blue = np.array([40,125,0], dtype=np.uint8)
    upper_blue = np.array([125,205,110], dtype=np.uint8) #night
    #upper_blue = np.array([150,240,115], dtype=np.uint8) #day

    #masking and morphological transformations to find green boxes
    mask = cv2.inRange(im, lower_blue, upper_blue)
    kernel = np.ones((2,2),np.uint8)
    morph = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,iterations = 2)
    kernel = np.ones((3,3),np.uint8)
    morph2 = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel,iterations = 3)
    kernel = np.ones((4,4),np.uint8)
    dilation = cv2.dilate(morph2,kernel,iterations = 1)
    invmask = 255 - dilation

    #blob detecting for green boxes
    params = cv2.SimpleBlobDetector_Params()
    params.filterByArea = True;
    params.minArea = 300;
    params.maxArea = 1700;
    detector = cv2.SimpleBlobDetector_create(params)

    #get keypoints and store as nparray
    keypoints = detector.detect(invmask)
    points = []
    for kp in keypoints:
        points.append(np.array([kp.pt[0],kp.pt[1]]))

    im = cv2.drawKeypoints(im, keypoints, np.array([]), (0,255,0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    return (points , im)
    def find_roi_normal(self):
        # self.mask = cv2.cvtColor(self.mask, cv2.CV_32SC1)
        hsv = cv2.cvtColor(self.rgb_image, cv2.COLOR_BGR2HSV)
        # [20, 20, 20]
        lower_red = np.array([30, 30, 30])
        # [255, 255, 255]
        upper_red = np.array([200, 200, 200])
        mask = cv2.inRange(hsv, lower_red, upper_red)
        res = cv2.bitwise_and(self.rgb_image, self.rgb_image, mask=mask)

        # (50, 50)
        close_kernel = np.ones((50, 50), dtype=np.uint8)
        close_kernel_tmp = np.ones((30, 30), dtype=np.uint8)
        image_close = Image.fromarray(cv2.morphologyEx(np.array(mask), cv2.MORPH_CLOSE, close_kernel))
        image_close_tmp = Image.fromarray(cv2.morphologyEx(np.array(mask), cv2.MORPH_CLOSE, close_kernel_tmp))
        # (30, 30)
        open_kernel = np.ones((30, 30), dtype=np.uint8)
        open_kernel_tmp = np.ones((30, 30), dtype=np.uint8)
        image_open = Image.fromarray(cv2.morphologyEx(np.array(image_close), cv2.MORPH_OPEN, open_kernel))
        image_open_tmp = Image.fromarray(cv2.morphologyEx(np.array(image_close_tmp), cv2.MORPH_OPEN, open_kernel_tmp))
        contour_rgb, bounding_boxes, contour_rgb_tmp = self.get_normal_image_contours(np.array(image_open),
                                                                                      self.rgb_image,
                                                                                      np.array(image_open_tmp))
        # self.draw_bbox(bounding_boxes)

        self.display(contour_rgb, contour_rgb_tmp)
Esempio n. 13
0
    def threshold_gradient_strength(self, gradient_mag):
        """ thresholds the gradient strength such that features are emphasized
        """
        lo, hi = gradient_mag.min(), gradient_mag.max()
        threshold = lo + self.params['gradient/threshold']*(hi - lo)
        bw = (gradient_mag > threshold).astype(np.uint8)
        
        for _ in xrange(2):
            bw = cv2.pyrDown(bw)

        # do morphological opening to remove noise
        w = 2#0
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (w, w))
        bw = cv2.morphologyEx(bw, cv2.MORPH_OPEN, kernel)
    
        # do morphological closing to locate objects
        w = 2#0
        bw = cv2.copyMakeBorder(bw, w, w, w, w, cv2.BORDER_CONSTANT, 0)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*w + 1, 2*w + 1))
        bw = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
        bw = bw[w:-w, w:-w].copy()

        for _ in xrange(2):
            bw = cv2.pyrUp(bw)
        
        return bw
Esempio n. 14
0
def processCard(image_o,scale):
    #Scale image down so functions work better and turns to greyscale
    image = cv2.resize(image_o, (image_o.shape[1]/scale, image_o.shape[0]/scale))

    #Processing image to improve reliability of finding corners
    image = cv2.bilateralFilter(image, 5, 150, 50)
    imgray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)

    kernel = np.ones((5,5),np.uint8)

    imgray = cv2.morphologyEx(imgray,cv2.MORPH_OPEN,kernel)
    imgray = cv2.morphologyEx(imgray,cv2.MORPH_CLOSE,kernel)

    imgray = cv2.Canny(imgray,40,50)
    """ Ploting of image before and after processing
    plt.subplot(121)
    plt.imshow(cv2.cvtColor(image_o, cv2.COLOR_BGR2RGB))
    plt.title("Original")
    plt.axis("off")
    plt.subplot(122)
    plt.axis("off")
    plt.title("After Canny Edge")
    plt.imshow(imgray)
    plt.gray()
    plt.show()
    """

    return imgray
Esempio n. 15
0
def cleanNoise( img, smallKernal, bigKernal):     
    ###### These steps should clean up some of smaller noise in the image 
    se1 = cv2.getStructuringElement(cv2.MORPH_RECT, smallKernal ) #### create kernel for the morphological operations
    se2 = cv2.getStructuringElement(cv2.MORPH_RECT, bigKernal ) 
    mask = cv2.morphologyEx(img, cv2.MORPH_CLOSE, se2) #### close = dilation followed by erosion 
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se1) #### open = erosion followed by dilation 
    return( mask )
Esempio n. 16
0
    def __init__(self, cv2, np, image, color, filters):


        if filters is not None:
            for filter in filters:
                print "Hello"
                self.image = runFilterScript(filter, image)

        
        # The frame capture is in RGB (Red-Blue-Green)
        # It need to be in HSV (Hue Saturation and Value) in order for opencv to perform color detection
        hsv_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        # Return the image with just the detected color
        detected = detectColor(hsv_img, color)

        str_el = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
        detected = cv2.morphologyEx(detected, cv2.MORPH_OPEN, str_el);
        detected = cv2.morphologyEx(detected, cv2.MORPH_CLOSE, str_el);

        # Find the contours
        contours = findContours(detected)

        rects = []

        for contour in contours:

            rect = cv2.minAreaRect(contour)
            box = cv2.cv.BoxPoints(rect)
            box = np.int0(box)
            rects.append(box)
            
        self.rects = rects
        self.cv2 = cv2
        self.detected = detected
def label_image(image):
    
    ROI = np.zeros((470,400,3), dtype=np.uint8)
    for c in range(3):
        for i in range(50,520):
            for j in range(240,640):
                ROI[i-50,j-240,c] = image[i,j,c]

    
    gray_ROI = cv2.cvtColor(ROI,cv2.COLOR_BGR2GRAY)
    
    ROI_flou = cv2.medianBlur((ROI).astype('uint8'),3)
    
    Laser = Detecte_laser.Detect_laser(ROI_flou)
    
    open_laser = cv2.morphologyEx(Laser, cv2.MORPH_DILATE, disk(3))
    
    skel = skeletonize(open_laser > 0)
    
    tranche = Detecte_laser.tranche(skel,90,30)    
    
    ret, thresh = cv2.threshold(gray_ROI*tranche.astype('uint8'),0,1,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    thresh01 = thresh<1.0
    
    open_thresh = cv2.morphologyEx(thresh01.astype('uint8'), cv2.MORPH_OPEN, disk(10))
    
    labelised = (label(open_thresh,8,0))+1
    
    return gray_ROI,labelised
def CenterOfMass(image):
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    img2 = image[:, :, ::-1].copy()

    lowera = np.array([160, 100, 0])
    uppera = np.array([180, 250, 255])
    lowerb = np.array([0, 100, 0])  # It was 100 instead of 195
    upperb = np.array([5, 250, 255])

    mask1 = cv2.inRange(hsv, lowera, uppera)
    mask2 = cv2.inRange(hsv, lowerb, upperb)

    mask = cv2.add(mask1, mask2)

    kernel = np.ones((5, 5),np.uint8)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

    mj=sum(mask)
    mi=sum(np.transpose(mask))
    A=mask.shape
    ni=np.array(range(A[0]))
    nj=np.array(range(A[1]))
    M=sum(sum(mask))
    if sum(mi)==0 or sum(mj)==0:
        print "no ball"
        xcm=0
        ycm=0
    else:
        xcm=np.dot(mj,nj)/sum(mj)
        ycm=np.dot(mi,ni)/sum(mi)

    CM=[ycm,xcm]

    return CM
Esempio n. 19
0
def loadF():
    currentFrame = cv2.imread(imglist[i]) #
    resizimg1 = cv2.resize(
            currentFrame,(0,0),fx=0.3,fy=0.3)# (0.3 works well)
    #workFrame  = resizimg1[80:-20,1:-10]
    workFrame  = resizimg1[100:-20,1:-10]
    #workFrame = cv2.copyMakeBorder(workFrame1,1,1,1,1,cv2.BORDER_CONSTANT,value=WHITE)
    workFramecp = workFrame.copy()
    workFrameGray = cv2.cvtColor(workFrame,cv2.COLOR_BGR2GRAY)
    # ---------------------------------------------------------
    currentMask1 = cv2.imread(maskslist[i])
    #currentMask2 = currentMask1[80:-20,1:-10]
    currentMask2 = currentMask1[100:-20,1:-10]
    # Slight opening to clean noise
    opened = cv2.morphologyEx(currentMask2, cv2.MORPH_OPEN, kernelO1)
    # find and draw external contours
    currentMaskOp1 = cv2.cvtColor(opened,cv2.COLOR_BGR2GRAY)
    # Perform small closing, to see
    currentMaskOp = cv2.morphologyEx(currentMaskOp1, cv2.MORPH_CLOSE, kernel)
    currentMask = cv2.convertScaleAbs(currentMaskOp)
    contoursE,hye = cv2.findContours(currentMask.copy(),
                                     cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    #print hye
    # cv2.CHAIN_APPROX_NONE
    return currentMask,contoursE,currentMask2,workFrame,workFramecp
Esempio n. 20
0
def removeNoise(img):
	kernel = np.ones((3, 3), np.uint8)
	
	dst = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
	dst = cv2.morphologyEx(dst, cv2.MORPH_CLOSE, kernel)
	
	return dst
def clean_thresholded_image(im):
    # Now that we have a thresholded image, we need to manipulate it a bit to
    # remove various irregularities in the image (pockets, weird edges, etc.)
    # This is done using Morphology operations. Currently the kernels are hand-
    # tuned. There may be some better way to do this in the future. This section
    # in the code is by far the most brittle, and has the biggest affect on the
    # quality of the results.
    skernel = np.ones((2,2),np.uint8) #small square kernel used for erosion
    # First erode the image a bit
    #erosion = cv2.erode(thresh, skernel,iterations = 1) #refines all edges in the binary image
    # Do a few iterations of opening and closing to smooth things out
    # There must be a better way to do this than how I'm doing it now, but this
    # seemed to be a quick way to get it to work well enough for my purposes
    opening = cv2.morphologyEx(im, cv2.MORPH_OPEN, skernel,iterations=3)
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, skernel,iterations=3)
    opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, skernel,iterations=3)
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, skernel,iterations=3)
    # You can also try bigger kernels, but I had more success with the iterative
    # smaller kernels above.
    # bkernel = np.ones((10,10),np.uint8) #big square kernel used for erosion
    # opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, bkernel)
    # closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, bkernel)
    #cv2.imshow('thresh',thresh)
    #iplot(thresh)
    return closing
Esempio n. 22
0
def squares_from_corner_image(centers):
  centers = cv2.morphologyEx(centers,cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20)),iterations = 1)
  centers = cv2.morphologyEx(centers,cv2.MORPH_ERODE, cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20)),iterations = 1)
  contour, hier = cv2.findContours(centers.copy(), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
  
  centroids = []
  for cnt in contour:
    mom = cv2.moments(cnt)
    (x,y) = int(mom['m10']/mom['m00']), int(mom['m01']/mom['m00'])
    #cv2.circle(img,(x,y),4,(0,255,0),-1)
    centroids.append((x,y))

  #for i, (x, y) in enumerate(centroids):
  #  cv2.putText(img, str(i), (x, y + 10), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,0,255))
  
  dtype = [('x', np.int32), ('y', np.int32)]
  centroids = np.array(centroids, dtype=dtype)
  centroids.sort(order="x")
  centroids = centroids.reshape((9, 9, ))
  for row in centroids:
    row.sort(order="y")
  
  squares = []
  for i, row in enumerate(centroids[:-1]):
    for j, _ in enumerate(row[:-1]):
      square = []
      for x, y in [(i, j), (i + 1, j), (i + 1, j + 1), (i, j + 1)]:
        point = centroids[x][y].tolist() #ugly hack to strip type information
        square.append(point)
      squares.append(np.array(square))
  return squares
Esempio n. 23
0
def get_red_areas_contours(image):
    """Find contours corresponding to red/orange cone pixels"""

    # binarize RGB image by "reddish" (orange + red) pixels
    b,g,r = cv2.split(image)
    b = b.astype(float)
    g = g.astype(float)
    r = r.astype(float)
    mat = b + g + r
    mat[mat == 0] = 1.0  # avoid division by 0
    r[r < 20] = 0        # ignore 'dark red'
    reddish = r/mat*255  # 'normalize' red, i.e. red compared to other color planes
    reddish = reddish.astype(np.uint8)
    __, binaryImg = cv2.threshold(reddish, 110, 255, cv2.THRESH_BINARY)
    binaryImg = cv2.morphologyEx(binaryImg, cv2.MORPH_OPEN, KERNEL)
    binaryImg = cv2.morphologyEx(binaryImg, cv2.MORPH_CLOSE, KERNEL)

    __, contours, hierarchy= cv2.findContours(binaryImg, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
    lMarks = []
    bRects = []
    areas = []
    for cnt in contours:
        area = cv2.contourArea(cnt)
        x,y,w,h = cv2.boundingRect(cnt)
        whRatio = w/float(h)
        arRatio = area/float(w*h)
        if area > 20 and arRatio > 0.5 and 0.33 < whRatio < 3:
            lMarks.append(cnt)
            bRects.append([x,y,w,h])
            areas.append(area)
        
    return lMarks, bRects, areas
Esempio n. 24
0
def ProcessImage(image, binary_threshold, close_size, open_size):
    _, img_thresh = cv2.threshold(image, binary_threshold, 250, cv2.THRESH_BINARY)    #Perform binary threshold to get black and white
    cv2.imshow('GRAY', image)                                            #Show Grayscale
    cv2.imshow('THRESHOLD', img_thresh)                                  #Show binary threshold image
    
    # MORPHING
    # First close the image to get rid of black dots within larger shapes
    circ_mask = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(close_size,close_size))  
    img_morph = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, circ_mask)     
    cv2.imshow('CLOSED', img_morph)    
    
    # Then open to get rid of white elements that are too small to be significant
    circ_mask = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(open_size,open_size))
    img_morph = cv2.morphologyEx(img_morph, cv2.MORPH_OPEN, circ_mask)    
    cv2.imshow('OPENED', img_morph)
       
    #CONTOURS
    img_contours = np.zeros((image.shape[0], image.shape[1],3), np.uint8)     #Create empty image
    #Get contours, with hierarchy so I don't lose nested (child) contours
    contours, heirarchy = cv2.findContours(img_morph, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) #Get contours
    #Add each contour with a random color 
    for contour_idx in range(len(contours)):
        #if not a child contour, draw with random color
        if heirarchy[0][contour_idx,3] == -1:
            cv2.drawContours(img_contours, contours, contour_idx, np.random.randint(0,255,3), -1)
        #if a child contour, just draw as background color
        else:
            cv2.drawContours(img_contours, contours, contour_idx, BACKGROUND_COLOR, -1)
    #Show contoured/colored output
    cv2.imshow('CONTOURS', img_contours)                                    
    cv2.waitKey(0)
    
    return
Esempio n. 25
0
def _extract_interesting_region(data):
    """Preprocesses image for frame finding. Steps taken are
        * Otsu thresholding
        * small-kernel-area opening to get rid of single/isolated bright pixels
          which are assumend to be noise
        * large-kernel-area opening to inverse the effect of the prior opening;
          this also serves the purpose to connect close bright areas (for the
          next step). Due to the distinct elongation of the kernel in the x-
          direction this especially favors horizontal structures.
        * finding largest connected region

    :param np.ndarray data: Image as 2D array of type uint8
    :returns: Mask as 2D array labeling the "interesting area" in that picture

    """
    assert data.dtype == np.uint8, "Image has wrong dtype."
    _, buf = cv.threshold(data, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)

    # Opening to get rid of noise
    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
    buf = cv.morphologyEx(buf, cv.MORPH_OPEN, kernel, iterations=3)

    # Closing to get connected area where signal should be
    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (31, 7))
    buf = cv.morphologyEx(buf, cv.MORPH_CLOSE, kernel, iterations=5)

    # Find the largest connected component
    cc = label(buf, neighbors=4, background=0) + 1
    largest_component = np.argmax(np.bincount(cc.ravel())[1:]) + 1
    return cc == largest_component
Esempio n. 26
0
    def __find_conveyor(self, initialization_frame):
        """Set x, y, w, and h to fit the conveyor size

        :param initialization_frame: Init initialization_frame from which we will calibrate others frames
        :return: None
        """

        kernel_size = int(self.__image_width / 40)
        kernel = np.ones((kernel_size, kernel_size), np.uint8)

        threshold = cv2.inRange(initialization_frame, self.CONVEYOR_COLOR_MIN, self.CONVEYOR_COLOR_MAX)
        _, threshold = cv2.threshold(threshold, 200, 255, cv2.THRESH_BINARY)

        self.__show('Conveyor image', threshold)

        threshold = cv2.morphologyEx(threshold, cv2.MORPH_OPEN, kernel, iterations=3)
        threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, kernel, iterations=3)

        _, contours, _ = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        biggest_contour = self.__get_biggest_contour(contours)

        if biggest_contour is None:
            self.__log('Conveyor not found')
            return 0, 0, 0, 0
        else:
            self.__log('Conveyor found at: ' + str(cv2.boundingRect(biggest_contour)))
            return cv2.boundingRect(biggest_contour)
def line_detector_drawn(image, show_plots = False):
    # resize it to a smaller factor so that
    # the shapes can be approximated better
    resized = imutils.resize(image, width=int(np.ceil(image.shape[1]/2)))
    ratio = image.shape[0] / float(resized.shape[0])

    # convert the resized image to grayscale, blur it slightly,
    # and threshold it
    gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (45, 45), 0)

    # thresh = cv2.threshold(blurred, 150, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
        cv2.THRESH_BINARY, 19, 2)
    kernel1 = np.ones((7, 7), np.uint8)
    thresh = 255 - thresh

    thresh = cv2.dilate(thresh, kernel1, iterations=1)
    kernel2 = np.ones((7, 7), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel2)
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel2)
    thresh = closing
    # if show_plots:
    #     cv2.imshow("Thresh", thresh)
    #     cv2.waitKey(0)

    return remove_blobs(image, resized, thresh, ratio, show_plots), ratio
Esempio n. 28
0
def detect_harris_squares(img):
  gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
  gray = cv2.GaussianBlur(gray,(3,3),0)
  gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 3, 2) 

  gray = cv2.morphologyEx(gray,cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4)),iterations = 1)
  gray = cv2.morphologyEx(gray,cv2.MORPH_ERODE, cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4)),iterations = 1)
  mask, x, y, width, height = get_board_mask(gray)
  roi = gray[y: y+ height, x: x + width]


  #result is dilated for marking the corners, not important
  
  # Threshold for an optimal value, it may vary depending on the image.

  dst = roi.copy()


  
  rst = cv2.cornerHarris(dst, 5, 1, 0.04)
  #dst = cv2.dilate(dst, None)
  width, height = rst.shape

  dst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
  dst_max = rst.max()*0.5
  for y in xrange(0, height):
    for x in xrange(0, width):
      harris = rst[x][y]
      # check the corner detector response
      if harris > dst_max:
       # draw a small circle on the original image
       cv2.circle(dst, (x,y), 2, (255, 0, 25))
Esempio n. 29
0
    def segmentate(self):
        self.reset()
        self.scale(2.0)
        self._img_orig = img_orig = self.img.copy()
        skew = self.skew(230, 255)
        if skew is None:
            print('Retry')
            skew = self.skew(20, 100)

        self.reset()
        #self.scale(2.0, cv2.INTER_NEAREST)
        self.scale(2.0, cv2.INTER_CUBIC)
        self.rotate(skew)
#        _, lev = self.levels(106, 122, 8.58)
#        th = self.hsv_threshold(lev)

        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 3))
#        closed = cv2.morphologyEx(th, cv2.MORPH_CLOSE, kernel)

        th = self.threshold(self.gray(self.hsv_levels(0, 172, 0.21, level=2)))
        closed = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)

        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 8))
        closed = cv2.morphologyEx(closed, cv2.MORPH_CLOSE, kernel)
        #cv2.imshow('closed', closed)
        #self.scale(2.0, cv2.INTER_NEAREST)
        #self.scale(0.5, cv2.INTER_CUBIC)
        self._gray = gray = self.hsv_threshold()

        self._mask_and = mask_and = cv2.bitwise_and(255-gray, 255-gray, mask=closed)
        #cv2.imshow('255-mask_and', 255-mask_and)

        img_scale = cv2.bitwise_and(self.img, self.img, mask=mask_and) #closed)

        self._cnts, self._img_dbg = Ojooo.detect_contours(img_scale, mask_and)
Esempio n. 30
0
	def morphFrame(self,frame):
		kernel = np.ones((20, 20), "uint8")
		opening = cv2.morphologyEx(frame, cv2.MORPH_OPEN, kernel)

		kernel = np.ones((8, 8), "uint8")
		frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernel)	
		return frame
def sobelx(image):
    sx = cv.Sobel(image, cv.CV_16S, 1, 0)
    absx = cv.convertScaleAbs(sx)
    absx = cv.morphologyEx(absx, cv.MORPH_CLOSE, rect_kernel)
    thresh = cv.threshold(absx, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)[1]
    return thresh
Esempio n. 32
0
    def run(self):
        if not self.tello.connect():
            print("Tello not connected")
            return
        if not self.tello.set_speed(self.speed):
            print("Not set speed to lowest possible")
            return

        self.tello.streamoff()
        self.tello.streamon()
        cap = self.tello.get_frame_read()

        should_stop = False
        # print('loop started')

        while not should_stop:
            frame = cap.frame  # 摄像头读取一帧
            for event in pygame.event.get():
                if event.type == USEREVENT + 1:
                    self.update()
                elif event.type == QUIT:
                    should_stop = True
                elif event.type == KEYDOWN:
                    if event.key == K_ESCAPE:
                        should_stop = True
                    else:
                        self.keydown(event.key)
                elif event.type == KEYUP:
                    self.keyup(event.key)

            self.screen.fill([0, 0, 0])

            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            green_mask = cv2.inRange(
                hsv, greenLower, greenUpper
            )  # 将低于lower_red和高于upper_red的部分分别变成0,lower_red~upper_red之间的值变成255
            kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
            green_mask = cv2.morphologyEx(green_mask, cv2.MORPH_OPEN,
                                          kernel)  # 开操作
            green_mask = cv2.morphologyEx(green_mask, cv2.MORPH_CLOSE,
                                          kernel)  # 闭操作
            cnts, hie = cv2.findContours(green_mask, cv2.RETR_TREE,
                                         cv2.CHAIN_APPROX_SIMPLE)

            if len(cnts) > 3:
                cnts.sort(key=cv2.contourArea, reverse=True)
                x1, y1, w1, h1 = cv2.boundingRect(cnts[0])
                x2, y2, w2, h2 = cv2.boundingRect(cnts[1])
                x3, y3, w3, h3 = cv2.boundingRect(cnts[2])
                x4, y4, w4, h4 = cv2.boundingRect(cnts[3])

                center1 = (int(x1 + w1 / 2), int(y1 + h1 / 2))
                center2 = (int(x2 + w2 / 2), int(y2 + h2 / 2))
                center3 = (int(x3 + w3 / 2), int(y3 + h3 / 2))
                center4 = (int(x4 + w4 / 2), int(y4 + h4 / 2))

                center = (
                    int((center1[0] + center2[0] + center3[0] + center4[0]) /
                        4),
                    int((center1[1] + center2[1] + center3[1] + center4[1]) /
                        4))
                w = max(abs(center1[0] - center2[0]),
                        abs(center1[0] - center3[0]),
                        abs(center1[0] - center4[0]))
                h = max(abs(center1[1] - center2[1]),
                        abs(center1[1] - center3[1]),
                        abs(center1[1] - center4[1]))
                # delta = min(abs(center2[1]-center1[1]), abs(center3[1]-center1[1]), abs(center4[1]-center1[1]))
                temp_a = center2[1] - center1[1]
                temp_b = center3[1] - center1[1]
                temp_c = center4[1] - center1[1]
                delta = min(abs(temp_a), abs(temp_b), abs(temp_c))
                flag = bool(temp_a * temp_b * temp_c > 0)  # 左偏

                percent = (w * h / 691200.) * 100.0
                cnt = torch.Tensor(center)
                actions = self.actor(cnt)
                actions = actions.cpu().data.numpy()
                # actions = speedControlLinear(center)

                done = bool(
                    get_dist(center[0], center[1]) < 60 and
                    (percent > 40)) and (delta < 10)
                # done = bool(get_dist(center[0], center[1]) < 60 and (percent > 40))
                if not done:
                    self.yaw_velocity = -int(actions[0])  # 强化学习需要加"-"
                    self.up_down_velocity = int(actions[1])
                    if percent < 40:
                        self.for_back_velocity = 15
                        if delta > 10:
                            # self.left_right_velocity = 10
                            # self.yaw_velocity = -10
                            if flag:
                                self.left_right_velocity = 10
                                self.yaw_velocity = -10
                            else:
                                self.left_right_velocity = -10
                                self.yaw_velocity = 10
                    cv2.circle(frame, center, 5, (255, 0, 0), -1)
                else:
                    self.yaw_velocity = 0
                    self.up_down_velocity = 0
                    self.for_back_velocity = 0
            else:
                self.for_back_velocity = -15
                # self.yaw_velocity = -self.yaw_velocity
                # self.up_down_velocity = -self.up_down_velocity

            frame = cv2.circle(frame, (480, 360), 5, (0, 0, 255), -1)
            cv2.imshow("xx", green_mask)
            k = cv2.waitKey(1)
            if k == 27:
                break

            frame = np.rot90(frame)
            frame = np.flipud(frame)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame = pygame.surfarray.make_surface(frame)
            self.screen.blit(frame, (0, 0))
            pygame.display.update()

            # time.sleep(0.1)

        # Call it always before finishing. I deallocate resources.
        self.tello.end()
Esempio n. 33
0
def contrast(frame):
	structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
	imgTopHat   = cv2.morphologyEx(frame, cv2.MORPH_TOPHAT, structuringElement)
	imgBlackHat = cv2.morphologyEx(frame, cv2.MORPH_BLACKHAT, structuringElement)
	return cv2.subtract(cv2.add(frame, imgTopHat), imgBlackHat)
Esempio n. 34
0
def get_img(mode):
    # This was intended to inhibit the stream warnings to the console, but it did not work:
    #text_trap = io.StringIO()
    #sys.stderr = text_trap
    if (mode == IMG_RGB):
        # gets one frame from the RGB camera
        frame = freenect.sync_get_video()[0]

        #frame = clahe.apply(frame)

        #background subsraction
        #Learning Rate Parameter / -1 auto, 0 not updated at all, 1 new from last frame
        fgMask = backSub.apply(frame, learningRate=-1)

        #threshold to get rid of any other color then black and white
        #anything lighter then 127 will be set to 255 (white) anything lower to 0 (black)
        # we dont need this if we turn shadows off
        #thresold expects a single channel image // with shadows enabled its a greyscale image
        #ret, fgMask = cv2.threshold(fgMask,127,255,cv2.THRESH_BINARY)

        #erosion
        #fgMask = cv2.erode(fgMask, kernel, iterations = 1)

        #dilation

        #fgMask = cv2.dilate(fgMask, kernel, iterations = 1)

        fgMask = cv2.morphologyEx(
            fgMask, cv2.MORPH_CLOSE,
            kernel_big)  # closes gaps smaller than 9x9 pixels

        #change color space from grayscale to BGR so we can draw a colored box later around blobs
        #col = cv2.cvtColor(fgMask, cv2.COLOR_GRAY2BGR)

    elif (mode == IMG_DEPTH):
        frame = freenect.sync_get_depth()[0]  # gets the Kinect depth image
        frame = 255 * np.logical_and(frame >= DEPTH - THRESHOLD,
                                     frame <= DEPTH + THRESHOLD)

        # we make sure its a 8-bit single-channel image // do we need this
        #frame = frame.astype(np.uint8)

        #background subsration // do we need this? // we do this already with the threshold
        fgMask = backSub.apply(frame, learningRate=-1)

        #do we need this // there are not grey colors in the depth image after the threshold
        #ret, fgMask = cv2.threshold(fgMask,127,255,cv2.THRESH_BINARY)

        #erosion
        #fgMask = cv2.erode(fgMask, kernel, iterations = 1) # morphological erode with 3x3

        #dilation

        #fgMask = cv2.dilate(fgMask, kernel, iterations=1)

        # do we need this
        fgMask = cv2.morphologyEx(
            fgMask, cv2.MORPH_CLOSE,
            kernel_big)  # closes gaps smaller than 9x9 pixels

        #change color space from grayscale to BGR so we can draw a colored box later around blobs
        #col = cv2.cvtColor(fgMask, cv2.COLOR_GRAY2BGR)

    # Problem: this function gives us sometimes only one blob instead of two
    ret, labels, stats, centroids = cv2.connectedComponentsWithStats(fgMask)

    # Reset output to stdout:
    #sys.stderr = sys.__stderr__
    return ret, frame, fgMask, labels, stats, centroids
Esempio n. 35
0
        allowPassage = False

    # RE-Initialize
    frameInfo = np.zeros((400, 500, 3), np.uint8)
    averageArea = averageSize()
    ret, frame = cap.read()  # read a frame
    frameForView = frame.copy()

    # Clean Frame
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    fgmask = fgbg.apply(gray)
    blur = cv2.medianBlur(fgmask, 5)
    thresh = cv2.threshold(
        blur, 127, 255, cv2.THRESH_BINARY)[1]  # shadow of MOG@ is grey = 127
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE,
                               kernel)  # fill any small holes
    opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)  # remove noise
    contours = cv2.findContours(opening.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[1]

    mask_opening = cv2.inRange(opening, np.array([0]), np.array([128]))
    noBg = cv2.bitwise_and(frame, frame, mask=mask_opening)

    # Process Contours
    for c in contours:
        # Filter Contour By Size
        if len(humanSizeSample) < 100:
            if cv2.contourArea(c) < minArea or cv2.contourArea(c) > maxArea:
                continue
            else:
                humanSizeSample.append(cv2.contourArea(c))
Esempio n. 36
0
def main(filename, show=True):

    if not show:
        cv2.imshow = lambda x, y: None

    video = cv2.VideoCapture(filename)

    fgbg = cv2.BackgroundSubtractorMOG2(100, 150, True)
    fgbg2 = cv2.BackgroundSubtractorMOG2(100, 150, False)

    # Use first frame for background removal
    playing, frame = video.read()
    fgbg.apply(frame)
    fgbg2.apply(frame)

    codecid = "avc1"
    codec = cv2.cv.FOURCC(*codecid)
    frame_size = frame.shape[1::-1]
    filename_out = '_out.'.join(filename.split('.', 1))
    video_out = cv2.VideoWriter(filename_out, codec, 10, frame_size)

    if not video_out.isOpened():
        raise RuntimeError('Output file not open')

    STATE = 0
    print states[STATE]

    count = 0
    while True:
        playing, frame = video.read()

        if not playing:
            break

        # blur image
        frame = cv2.GaussianBlur(frame, (3, 3), 0)

        fgmask = fgbg.apply(frame)
        fgmask2 = fgbg2.apply(frame)

        if STATE < 2:
            # Find shadow region

            cv2.imshow('frame', fgmask)

            opened = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
            # cv2.imshow('opened frame', opened)
            # get just grey (127)

            shadow_mask, shadow_size = get_large_contour(
                opened, SHADOW_SIZE_THRESH, 126, 128)

            if shadow_mask is not None:
                # cv2.imshow('shadow mask', shadow_mask)
                # make shadows blue
                frame[shadow_mask == 255] = (255, 0, 0)

                if STATE == 0:
                    if shadow_size > 4000:
                        STATE = 1
                        print states[STATE]

                if STATE == 1:
                    if shadow_size < 2000:
                        STATE = 2
                        print states[STATE]

        if STATE == 2:
            # Isolate truck trap
            # detect large rocks

            # remove small noise (~3)
            opened = cv2.morphologyEx(fgmask2, cv2.MORPH_OPEN, smallkernel)

            # close to combine rubble in back of truck
            closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, bigkernel)
            # cv2.imshow('closed frame', closed)

            # get large background area
            large_mask, mask_size = get_large_contour(closed, 100, 254, 255)

            if large_mask is None:
                continue

            # Background green
            large_mask = cv2.morphologyEx(large_mask, cv2.MORPH_DILATE, kernel)
            frame[large_mask != 255] = (0, 255, 0)

            # ignore background
            fgmask2[large_mask != 255] = 0

            cv2.imshow('frame', fgmask2)
            opened = cv2.morphologyEx(fgmask2, cv2.MORPH_OPEN, smallkernel)
            # cv2.imshow('opened frame', opened)

            contours, hierarchy = cv2.findContours(opened, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            large_rocks_mask = np.zeros(opened.shape, np.uint8)

            # Find large contours
            large_rocks = [
                contour for contour in contours
                if cv2.contourArea(contour) > LARGE_ROCK_THRESH
            ]

            # find rectangular objects
            round_rocks = []
            for rock in large_rocks:
                x, y, w, h = cv2.boundingRect(rock)
                ratio = w / h if w < h else h / w
                if ratio > ROCK_ROUNDNESS:
                    round_rocks.append(rock)
                #import ipdb; ipdb.set_trace()

            cv2.drawContours(large_rocks_mask, round_rocks, -1, 255, -1)
            #cv2.drawContours(large_rocks_mask, round_rocks, -1, 190, -1)
            cv2.imshow('large rocks', large_rocks_mask)
            large_rocks_mask = cv2.morphologyEx(large_rocks_mask,
                                                cv2.MORPH_CLOSE, kernel)
            # print len(large_rocks)
            frame[large_rocks_mask == 255] = (0, 0, 255)

        cv2.imshow('raw', frame)
        video_out.write(frame)

        if show:
            wait()

        count += 1

    video_out.release()
Esempio n. 37
0
        rate = rate + 0.11
    mask = color_detect(frame_list[i])
    mask_list.append(mask)
del mask
gc.collect()

# マスク領域を少し膨らませる
print('Dilating mask...')
rate = 0.11
for i in range(fnum):
    if((i/fnum) >= rate):
        print(str(round(rate*100)) +'%')
        rate = rate + 0.11
    kernel = np.ones((3,3), np.uint8)
    mask_list[i] = cv2.dilate(mask_list[i], kernel, iterations=1)
    mask_list[i] = cv2.morphologyEx(mask_list[i], cv2.MORPH_OPEN, kernel)
del kernel
gc.collect()

# マスク領域を除去し周りの画素から補完
print('Image Inpainting...')
rate = 0.11
for i in range(fnum):
    if((i/fnum) >= rate):
        print(str(round(rate*100)) +'%')
        rate = rate + 0.11
    frame_list[i] = cv2.inpaint(frame_list[i], mask_list[i], 3, cv2.INPAINT_TELEA)
del mask_list
gc.collect()

# TLDの初期化
Esempio n. 38
0
if len(sys.argv) < 2:
    print("Please call `./find_contours.py <image_file>`")
    sys.exit(-1)

image_path = sys.argv[1]
image = cv2.imread(image_path)
# create a kernel to be useed by the gradient operation.
kernel = np.ones((5, 5), np.uint8)

# make the image gray for easier processing
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# get the median blur of the image to enhance edge detection
median = cv2.medianBlur(gray, 3)
# apply gradient morphology to make the strong edges more complete
gradient = cv2.morphologyEx(median, cv2.MORPH_GRADIENT, kernel)
# adaptive threshold to make canny edge work better.
threshold = cv2.adaptiveThreshold(gradient, 255,
                                  cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                  cv2.THRESH_BINARY, 11, 2)
# get the canny edges of the image
canny = cv2.Canny(gray, 150, 200, 3)

# find contours on the canny edges by creating tree hierarchy, simple approx of points
_, contours, _ = cv2.findContours(threshold, cv2.RETR_TREE,
                                  cv2.CHAIN_APPROX_SIMPLE)


# a function to check the contour has 4 corners, and is a convex
def checkIsSquare(approx):
    return len(approx) == 4 and cv2.isContourConvex(approx)
Esempio n. 39
0
 def __call__(self, img):
     opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, self.kernel)
     return opening
def morph(image):
    return cv.morphologyEx(image, cv.MORPH_TOPHAT, rect_kernel)
Esempio n. 41
0
plt.imshow(img_ori, cmap='gray')

# 회색 이미지로 만들기

# hsv = cv2.cvtColor(img_ori, cv2.COLOR_BGR2HSV)
# gray = hsv[:,:,2]
gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)

plt.figure(figsize=(12, 10))
plt.imshow(gray, cmap='gray')


# 대비 극대화
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, structuringElement)

imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)

plt.figure(figsize=(12, 10))
plt.imshow(gray, cmap='gray')

# Adaptive Thresholding = 적응형 임계값
img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)

img_thresh = cv2.adaptiveThreshold(
    img_blurred, 
    maxValue=255.0, 
    adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 
Esempio n. 42
0
    # hue=0, saturation=120, Brightness=50
    lower_red = np.array([0, 120, 50])
    upper_red = np.array([10, 255, 255])
    mask1 = cv2.inRange(
        hsv, lower_red, upper_red
    )  # mask1 detects red color as white and remaining BG as black

    lower_red = np.array([170, 120, 70])
    upper_red = np.array([180, 255, 255])
    mask2 = cv2.inRange(
        hsv, lower_red,
        upper_red)  # mask1 detects red color as black and BG as white

    mask1 = mask1 + mask2

    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones(
        (3, 3), np.uint8))  # MORPH_DILATE helps to remove noise

    mask2 = cv2.bitwise_not(mask1)

    res1 = cv2.bitwise_and(
        img, img,
        mask=mask2)  # it makes BG img visible and our red cloth mask as black
    res2 = cv2.bitwise_and(
        backgorund, backgorund, mask=mask1
    )  # it makes cloth portion stores the static img from vid as visible and BG as black

    final = cv2.addWeighted(
        res1, 1, res2, 1, 0
    )  # we are giving same weights for both res1, res2 and 0 for gamma correction
Esempio n. 43
0
def processPngFile(outRoot, origFile, fileNum):

    baseName = os.path.basename(origFile)
    baseBase, _ = os.path.splitext(baseName)
    outDir = os.path.join(outRoot, "%s.%03d" % (baseBase, fileNum))
    inFile = os.path.join(outDir, baseName)

    outRoot2, outDir2 = os.path.split(outRoot)
    outFile2 = os.path.join(outRoot2, "%s.entropy" % outDir2, "%s.thresh.png" % baseBase)
    outFile2Gray = os.path.join(outRoot2, "%s.entropy" % outDir2, "%s.levels.png" % baseBase)
    print("outFile2=%s" % outFile2)

    imageColor = imread(origFile, as_gray=False)
    imageColor = img_as_ubyte(imageColor)

    image = imread(origFile, as_gray=True)
    image = img_as_ubyte(image)
    print("  image=%s" % desc(image))

    if False:
        denoised = cv2.fastNlMeansDenoising(image, None,
                                                templateWindowSize=templSize,
                                                searchWindowSize=searchSize)

        print("  denoised=%s" % desc(denoised))
        print("+" * 80)
        entImageGray = entropy(denoised, entropyKernel)
    else:
        entImageGray = entropy(image, entropyKernel)

    print("entImageGray=%s" % desc(entImageGray))

    # entImageClipped is for display only
    entImageClipped = 0.5 * entImageGray / entropyThreshold  # !@#$
    entImageClipped = np.clip(entImageClipped, 0.0, 1.0)

    # entImage is the thresholded image we use for detecting natural images
    entImage = normalize(entImageGray)
    print("entImage=%s" % desc(entImage))
    entImage = img_as_ubyte(entImage)
    print("entImage=%s" % desc(entImage))

    outDir2 = os.path.dirname(outFile2)
    os.makedirs(outDir2, exist_ok=True)
    # imsave(outFile2Gray, entImageClipped)
    imsave(outFile2, entImage)

    edged = cv2.Canny(entImage, 30, 200)
    # edgedD = cv2.dilate(edged, outlineKernel)
    edgedD = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, outlineKernel)

    edgeName = outFile2 + ".edges.png"
    dilatedName = outFile2 + ".dilated.png"
    imsave(edgeName, edged)
    imsave(dilatedName, edgedD)

    contours, _ = cv2.findContours(edgedD.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    print("%d contours %s" % (len(contours), type(contours)))
    # print("%d contours %s:%s" % (len(contours), list(contours.shape), contours.dtype))
    contours.sort(key=cv2.contourArea, reverse=True)
    # contours = contours[:5]  # get largest five contour area
    rects = []
    cIm = None
    cImLevel = None
    cImE = None
    cImEFull = None
    for i, c in enumerate(contours):
        area = cv2.contourArea(c)
        if area < minArea:
            break
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, contourEpsilon * peri, True)
        x, y, w, h = cv2.boundingRect(approx)
        print("## %d: area=%g peri=%g p/a=%g %s %s" % (i, area, peri, peri*peri/area, [x, y], [w, h]))

        rect = {"X0": x, "Y0": y, "X1": x+w, "Y1": y+h}
        rects.append(rect)

        if cIm is None:
            cIm = imageColor.copy()
        cIm = cv2.rectangle(cIm, (x, y),  (x+w, y+h), color=(255, 0, 0), thickness=20)
        cIm = cv2.rectangle(cIm, (x, y),  (x+w, y+h), color=(0, 0, 255), thickness=10)

        if cImEFull is None:
            cImEFull = imageColor.copy()
        cImEFull = cv2.rectangle(cImEFull, (x, y), (x+w, y+h), color=(255, 0, 0), thickness=20)
        cImEFull = cv2.rectangle(cImEFull, (x, y), (x+w, y+h), color=(0, 0, 255), thickness=8)
        cImEFull = cv2.rectangle(cImEFull, (x, y), (x+w, y+h), color=(255, 255, 255), thickness=1)

        if cImE is None:
            cImE = edged.copy()
            cImE = cv2.cvtColor(cImE, cv2.COLOR_GRAY2RGB)
        cImE = cv2.rectangle(cImE, (x, y), (x+w, y+h), color=(255, 0, 0), thickness=10)

        if cImLevel is None:
            cImLevel = entImageClipped.copy()
            cImLevel = img_as_ubyte(cImLevel)
            cImLevel = cv2.cvtColor(cImLevel, cv2.COLOR_GRAY2RGB)
        cImLevel = cv2.rectangle(cImLevel, (x, y), (x+w, y+h), color=(255, 0, 0), thickness=10)

    if cIm is not None:
        cName = outFile2 + ".cnt.col.png"
        imsave(cName, cIm)
        print("~~~Saved %s" % cName)
    if cImLevel is not None:
        levelFile = outFile2 + ".level.png"
        imsave(levelFile, cImLevel)
        print("~#~Saved %s" % levelFile)
    if cImE is not None:
        cNameE = outFile2 + ".cnt.edge.png"
        imsave(cNameE, cImE)
        print("~#~Saved %s" % cNameE)
    if cImEFull is not None:
        cNameEFull = outFile2 + ".cnt.edge.full.png"
        imsave(cNameEFull, cImEFull)
        print("~$~Saved %s" % cNameEFull)
    # assert False
    return rects
Esempio n. 44
0
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: xiao
# Created Time : 2019-05-22
# File Name: erode_dilate.py
# Description:
"""

import cv2
import numpy as np 



if __name__=='__main__':
    imgfn = 'test.jpg'
    img = cv2.imread(imgfn)
    for k in range(3,8):
        kernel = np.ones((k,k),np.uint8)  
        erosion = cv2.erode(img,kernel,iterations = 1)
        dilation = cv2.dilate(img,kernel,iterations = 1)
        closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
        opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
        new_img = np.vstack([erosion, dilation, closing, opening])
        cv2.imshow('newimg_k_{}'.format(k), new_img)
        cv2.waitKey()
Esempio n. 45
0
handler = networking.create_gst_handler(pipeline, gs.SRC_NAME, 'valve',
                                        gs.UDP_NAME)

acceptThread = threading.Thread(target=networking.server.AcceptClients,
                                args=[sock, clis, handler])
acceptThread.daemon = True  # Makes the thread quit with the current thread
acceptThread.start()

frames = 0
start = time.time()

while True:
    status, img = vc.read()
    if status:
        mask = get_mask(img)
        closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, KERNEL, iterations=2)
        opening = cv2.morphologyEx(closing,
                                   cv2.MORPH_OPEN,
                                   KERNEL2,
                                   iterations=4)

        _, cnt, _ = cv2.findContours(opening, cv2.RETR_TREE,
                                     cv2.CHAIN_APPROX_SIMPLE)
        # cv2.drawContours(img, cnt, -1, (0, 0, 255), 2)
        valid = list(filter(valid_cnt, cnt))
        # cv2.drawContours(img, valid, -1, (0, 255, 255), 2)

        bestcnt = max(valid, key=weighted_score) if len(valid) > 0 else None
        if bestcnt is not None:
            # cv2.drawContours(img, [bestcnt], -1, (0, 255, 0), 2)
            bbx, bby, bbw, bbh = cv2.boundingRect(bestcnt)
Esempio n. 46
0
Lower_green = np.array([110,50,50])

Upper_green = np.array([130,255,255])

while True:

	ret, img=cap.read()	hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)

	kernel=np.ones((5,5),np.uint8)

	mask=cv2.inRange(hsv,Lower_green,Upper_green)

	mask = cv2.erode(mask, kernel, iterations=2)

	mask=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernel)

	#mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)

	mask = cv2.dilate(mask, kernel, iterations=1)

	res=cv2.bitwise_and(img,img,mask=mask)

	cnts,heir=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2:]

	center = None

 

	if len(cnts) > 0:
Esempio n. 47
0
def italic2(img):
    h, w = img.shape[:2]
    gray1 = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
    # cvtColor(src,type)
    # src原图像,type转换类型
    # cv2.COLOR_BGR2BGRA 将alpha通道增加到BGR或者RGB图像中
    avg = np.average(gray1)
    ret, binary = cv2.threshold(gray1, 200, 255, cv2.THRESH_BINARY)
    # threshold(src,thresh,maxval,type)
    # src原图像,thresh阈值,maxval输出图像的最大值,type阈值类型
    # THRESH_BINARY---二值阈值化
    kernel = np.ones((60, 60), np.uint8)
    # 设置方框大小及类型
    dst = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
    # cv2.morphologyEx(src, type, kernel)
    # src 原图像 type 运算类型 kernel 结构元素
    # cv2.MORPH_OPEN 进行开运算,指的是先进行腐蚀操作,再进行膨胀操作
    # 开运算(open):先腐蚀后膨胀的过程。
    kernel = np.ones((100, 100), np.uint8)
    dst = cv2.morphologyEx(dst, cv2.MORPH_CLOSE, kernel)
    # cv2.MORPH_CLOSE 进行闭运算, 指的是先进行膨胀操作,再进行腐蚀操作
    # 闭运算(close):先膨胀后腐蚀的过程。
    #cv2.imwrite("dst.jpg", dst)
    gray2 = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
    # BGR和灰度图的转换使用 cv2.COLOR_BGR2GRAY
    contours, hierarchy = cv2.findContours(gray2, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    # cv2.findContours(image, mode, method)
    # image一个8位单通道二值图像(非0即1) mode轮廓的检索模式:cv2.RETR_EXTERNAL表示只检测外轮廓  method为轮廓的近似办法: cv2.CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标
    # contours返回一个list,list中每个元素都是图像中的一个轮廓,用numpy中的ndarray表示
    # hierarchy返回一个可选的hiararchy结果,这是一个ndarray,其中的元素个数和轮廓个数相同,每个轮廓contours[i]对应4个hierarchy元素hierarchy[i][0] ~hierarchy[i][3],分别表示后一个轮廓、前一个轮廓、父轮廓、内嵌轮廓的索引编号,如果没有对应项,则该值为负数。
    #cnt = contours[0]
    area = []
    for i in range(len(contours)):
        area.append(cv2.contourArea(contours[i]))
    max_idx = np.argmax(area)
    cnt = contours[max_idx]
    # 选择contours列表中的第一个元素
    leftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])
    rightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])
    topmost = tuple(cnt[cnt[:, :, 1].argmin()][0])
    bottommost = tuple(cnt[cnt[:, :, 1].argmax()][0])
    #获得轮廓的最上面,最下面,最左边,最右边的点。
    M = cv2.moments(cnt)
    cX = int(M["m10"] / M["m00"])
    cY = int(M["m01"] / M["m00"])
    # 获得轮廓的重心
    if cX < w / 2:
        y = bottommost[1] - topmost[1]
        x = topmost[0] - bottommost[0]
        # 得到椭圆的短直径
        r = int(
            np.sqrt(
                np.square(topmost[0] - bottommost[0]) +
                np.square(topmost[1] - bottommost[1])))
        # 得到椭圆的长直径
        jz = rightmost[0] - bottommost[0]
        jz1 = bottommost[1] - rightmost[1]
        cX1 = rightmost[0] + jz - 1.5 * x
        cY1 = bottommost[1] + jz1 + 1.5 * y
        cX2 = rightmost[0] + jz + 1.5 * x
        cY2 = bottommost[1] + jz1 - 1.5 * y
        cX3 = rightmost[0] + jz + 4 * y - 2 * x
        cY3 = bottommost[1] + jz1 + 4 * x + 2 * y
        cX4 = rightmost[0] + jz + 4 * y + 2 * x
        cY4 = bottommost[1] + jz1 + 4 * x - 2 * y
        pts1 = np.float32([[cX1, cY1], [cX2, cY2], [cX3, cY3], [cX4, cY4]])
        pts2 = np.float32([[0, 0], [4 * r, 0], [0, 4 * r], [4 * r, 4 * r]])
        MM = cv2.getPerspectiveTransform(pts1, pts2)
        # cv2.getPerspectiveTransform(src,dst) 计算转换矩阵
        # src输入图像的,dst输出图像
        dst = cv2.warpPerspective(img, MM, (4 * r, 4 * r))
    elif cX > w / 2:
        y = bottommost[1] - topmost[1]
        x = bottommost[0] - topmost[0]
        # 得到椭圆的短直径
        r = int(
            np.sqrt(
                np.square(topmost[0] - bottommost[0]) +
                np.square(topmost[1] - bottommost[1])))
        # 得到椭圆的长直径
        jz = bottommost[0] - leftmost[0]
        jz1 = leftmost[1] - bottommost[1]
        cX1 = leftmost[0] - jz - 1.5 * x
        cY1 = bottommost[1] + jz1 - 1.5 * y
        cX2 = leftmost[0] - jz + 1.5 * x
        cY2 = bottommost[1] + jz1 + 1.5 * y
        cX3 = leftmost[0] - jz - 4 * y - 2 * x
        cY3 = bottommost[1] + jz1 + 4 * x - 2 * y
        cX4 = leftmost[0] - jz - 4 * y + 2 * x
        cY4 = bottommost[1] + jz1 + 4 * x + 2 * y
        pts1 = np.float32([[cX1, cY1], [cX2, cY2], [cX3, cY3], [cX4, cY4]])
        pts2 = np.float32([[0, 0], [4 * r, 0], [0, 4 * r], [4 * r, 4 * r]])
        MM = cv2.getPerspectiveTransform(pts1, pts2)
        # cv2.getPerspectiveTransform(src,dst) 计算转换矩阵
        # src输入图像的,dst输出图像
        dst = cv2.warpPerspective(img, MM, (4 * r, 4 * r))
    #pts1 = np.float32([[cX-2*x,cY+x],[cX+2*x,cY+x],[cX-2*r,cY+x+4*r],[cX+2*r,cY+x+4*r]])
    #从原图中获得需要进行透视变换的图像
    #设置透视变换后输出图像的格式
    #MM = cv2.getPerspectiveTransform(pts1,pts2)
    #cv2.getPerspectiveTransform(src,dst) 计算转换矩阵
    #src输入图像的,dst输出图像
    #dst = cv2.warpPerspective(img,MM,(4*r,4*r))
    #print(r)
    #print(x)
    #warpPerspective进行透视变换
    return dst
Esempio n. 48
0
    def main_detection(self, filename=None):
        # Display options for Spyder
        # get_ipython().run_line_magic('matplotlib', 'qt')    #to plot in seqparate window in Spyder
        # get_ipython().run_line_magic('matplotlib', 'inline') #to plot in console inline

        filename = self.filename
        #get the data
        loaded = io.loadmat(filename)
        D_I = loaded['D_I']
        FG_ST = loaded['FG_ST']
        mat = D_I['m'][0][0]
        mat2 = FG_ST['m'][0][0]

        D_I_info = info(D_I['xmin'][0][0], D_I['xmax'][0][0],
                        D_I['ymin'][0][0], D_I['ymax'][0][0])
        FG_ST_info = info(FG_ST['xmin'][0][0], FG_ST['xmax'][0][0],
                          FG_ST['ymin'][0][0], FG_ST['ymax'][0][0])

        # Image sizes
        x_orig = mat.shape[1]
        y_orig = mat.shape[0]
        scale_percent = 220  # percent of original size
        width = 600  #int(img.shape[1] * scale_percent / 100)
        height = 500  #int(img.shape[0] * scale_percent / 100)

        shift_amt = 0
        # fig = plt.figure()
        best = 0
        best_l = []
        best_e = []
        a = HoughBundler()

        # Parameters
        hough_thresh = 60  #default 60. increase for less lines, decrease for more
        gray_thresh = 127  #default 127. decrease for fainter lines
        min_lines = 10  #default 10. Minimum number of lines you expect for a diagram

        # plt.imshow((mat), interpolation='nearest', aspect='auto', origin='lower')
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        # %% Part I - Image Processing / Line Detection
        while (1):
            mat3 = shiftMatrix(mat, shift_amt)
            # plt.imshow(mat, interpolation='nearest', aspect='auto')
            # plt.imshow(mat, interpolation='nearest', aspect='auto')

            mimg.imsave(tmp_dir + "/test.jpg", mat3, origin='lower')
            # mimg.imsave(tmp_dir +"/test2.jpg", mat, origin='lower')

            # Get image, rescale, convert to gray and blur
            img = cv2.imread(tmp_dir + "test.jpg")
            dim = (width, height)
            img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
            cv2.imwrite("test_resized.jpg", img)

            # Convert to grayscale,threshold and create edges
            kernel = np.ones((5, 5), np.uint8)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            ret, thresh1 = cv2.threshold(gray, gray_thresh, 255,
                                         cv2.THRESH_BINARY)
            edges = cv2.Canny(thresh1, 50, 150, apertureSize=3)
            fill = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)

            img_concate_Hori = np.concatenate((gray, thresh1), axis=1)
            img_concate_Vert = np.concatenate((edges, fill), axis=1)
            img_concate = np.concatenate((img_concate_Hori, img_concate_Vert),
                                         axis=0)
            # cv2.imshow('concatenated_H',img_concate)

            #Hough Transform - Actually generates the lines
            lines = cv2.HoughLinesP(edges,
                                    rho=1,
                                    theta=1 * np.pi / 180,
                                    threshold=58,
                                    minLineLength=50,
                                    maxLineGap=100)

            # Save best line
            if lines is not None:
                if (len(lines) > len(best_l)):
                    best = shift_amt
                    best_l = lines
                    best_e = edges
                line_rem = a.process_lines(lines, edges)
                # if len(line_rem) > min_lines:
                # break

            shift_amt += 1
            print("trying", shift_amt, "element shift")
            if shift_amt > 10:
                shift_amt = -10
            elif shift_amt == -1:
                print("returning: ", best)
                line_rem = a.process_lines(best_l, best_e)
                break

        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        # Save image w/o lines
        mimg.imsave(tmp_dir + "/new_resized.jpg", img, origin='lower')

        # Create line joining two points using bresenham algorithm, also plot
        img = refresh(mat, best, width, height)
        new_line_pts = [
        ]  # list of arrays that contain all the points for RESIZED TO ORIGINAL
        new_lines = [
        ]  # list of x1y1, x2y2 points that contain the start and end of lines RESIZED TO ORIGINAL
        y_scale = y_orig / height
        x_scale = x_orig / width
        S = [x_scale, y_scale]
        for idx, line in enumerate(line_rem):
            # print ("{} - idk {} - {}".format(idx, line[0], line[1]))
            cv2.line(img, tuple(line[0]), tuple(line[1]), (0, 255, 255), 3)
            a = [int(round(x)) for x in line[1] * S]
            b = [int(round(x)) for x in line[0] * S]
            new_line_pts.append([a, b])
            new_lines.append(
                list(bresenham(*new_line_pts[-1][0], *new_line_pts[-1][1])))

        # cv2.imshow("edges", img)
        # plt.imshow(img)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        mimg.imsave(tmp_dir + "/lines.jpg", img, origin='lower')

        #Remove duplicates so there is only one point in each row
        for idx, line in enumerate(new_lines):
            i = line[1][1]
            to_Remove = []
            j = 0
            for pt in line:
                if pt[1] == i and j > 0:
                    to_Remove.append(pt)
                elif pt[1] != i:
                    i = pt[1]
                    j = 1
                else:
                    j += 1
            # line.remove(to_Remove)
            new_lines[idx] = [i for i in line if i not in to_Remove]
        #%% Part II - Maths and my Algos - Generate the offsets between lines

        # Extract padded blocks - a single "block" is a row of mat2 given the location of the line generated from mat 1
        # In other words, this function extracts the elements surrounding a line in matrix2
        blocks = []
        block_Hsize = 40  # block half size
        for idx, line in enumerate(new_lines):
            one_block = []
            for y in line:
                x = np.arange(max(y[0] - block_Hsize, 0),
                              min(y[0] + block_Hsize, x_orig))
                one_block.append(mat2[y[1] - 1, x])
            blocks.append(one_block)

        derivative = np.diff(blocks[6])
        mode_m = (mode(mode(derivative, axis=1)[0])[0])
        print(mode_m)
        slope_locations = []  #x-values where the gradient matches the mode
        offset = [
        ]  #list of the vertical offset before and after the "jumps" for each line
        sections = []  #array containing the x1y1, x2y2 points

        for block in blocks:
            slope_locations_tmp = []
            offset_tmp = []
            sections_tmp = []
            for row in block:
                derivative = np.diff(row)
                x = find_longest(derivative, mode_m)
                slope_locations_tmp.append(x)
                # Do i have to rescale the X??? not true current
                section1 = (x[0][0], row[x[0][0]], x[0][1], row[x[0][1]])
                section2 = (x[1][0], row[x[1][0]], x[1][1], row[x[1][1]])
                sections_tmp.append((section1, section2))
                offset_tmp.append(abs(getYInt(*section1) - getYInt(*section2)))
            slope_locations.append(slope_locations_tmp)
            offset.append(offset_tmp)
            sections.append(sections_tmp)

        tmp_offset = []
        for row in offset:
            tmp_offset.append(np.mean(row))

        mean_offset = (np.mean(tmp_offset))
        # derivative = np.diff(blocks[0][0])
        # x = find_longest(derivative, mode_m)

        # print(slope_locations)
        # plt.plot(blocks[0][0])
        # plt.plot(offset)
        #%% Reshape and remap points from  image resolution (0-x_orig)by(0-y_orig) pin pixels to measurement resolution (xmin-xmax)by(ymin-ymax) in amps
        orig_yrange = (D_I_info.ymax - D_I_info.ymin)
        orig_xrange = (D_I_info.xmax - D_I_info.xmin)
        orig_gradient = orig_yrange / orig_xrange
        new_gradient = []
        for row in new_line_pts:
            x1 = D_I_info.xmin + row[1][0] * orig_xrange / x_orig
            x2 = D_I_info.xmin + row[0][0] * orig_xrange / x_orig
            y1 = D_I_info.ymax - row[1][1] * orig_yrange / y_orig
            y2 = D_I_info.ymax - row[0][1] * orig_yrange / y_orig
            dx = x1 - x2
            dy = y1 - y2
            new_gradient.append(dy / dx)
            row = [[x1, y1], [x2, y2]]

        # print(new_gradient)

        #%% Part III - Final Calculations
        x_intercepts = []
        y_intercepts = []
        for line in new_line_pts:
            x_intercepts.append(getXInt(*line[0], *line[1]))
            y_intercepts.append(getYInt(*line[0], *line[1]))

        #Delta Q = 1 electron charge
        DeltaQ = 1.60217662e-19

        #chargine energy of Dot: assume 5 meV
        Edot = 5e-3
        Cdot = DeltaQ / Edot
        # e/eV

        #charging energy of SET: assume 20meV
        Eset = 20e-3
        Cset = DeltaQ / Eset

        #Capacitance between TG and SET
        Lset = 20  #SET Lever Arm
        Ldot = 20  #DOT Lever Arm (NOT USED)
        Ctgs = Lset * 1e-2 * Cset

        #capacitance between dot and SET
        DeltaV = mean_offset * 0.02
        #the average of all the offset
        print("ΔVtgs = ", DeltaV, "V")
        Cds = DeltaV / Edot * Ctgs

        #capacitance g1 and d
        DeltaV1d = np.mean(np.gradient(np.squeeze(y_intercepts)))
        Cg1d = DeltaQ / DeltaV1d * 5
        print("ΔVg1 = ", DeltaV1d, "* 0.2 V = ", DeltaV1d / 5, "V")

        #capacitance g2 and dot
        DeltaV2d = np.mean(np.gradient(np.squeeze(x_intercepts)))
        Cg2d = DeltaQ / DeltaV2d * 5
        print("ΔVg2 = ", DeltaV2d, "* 0.2 V = ", DeltaV2d / 5, "V")

        #Capactitance g2 and set
        Cg2s = 1e-19

        #capacitance g1 and set
        m = 0.0010
        #gradient of the TG map
        Cg1s = m * Ctgs

        #capacitance to infinity - Dont need!
        #                 Cd = Cdot - Cds - Cg1d - Cg1s;
        #                 Cs = Cset - Cds - Cg1s - Cg2s - Ctgs;

        #Table Column data
        scale = 1e+18
        DOT = np.round(scale * np.asarray([Cdot, -Cds, -Cg1d, -Cg2d, 0]), 4)
        SET = np.round(scale * np.asarray([-Cds, Cset, -Cg1s, -Cg2s, -Ctgs]),
                       4)
        G1 = np.round(scale * np.asarray([-Cg1d, -Cg1s, 0, 0, 0]), 4)
        G2 = np.round(scale * np.asarray([-Cg2d, -Cg2s, 0, 0, 0]), 4)
        TG = np.round(scale * np.asarray([0, -Ctgs, 0, 0, 0]), 4)

        t = PrettyTable([' ', 'Dot', 'SET', 'G1', 'G2', 'TopGate'])
        t.float_format = 4.4
        t.add_row(['Dot', *DOT])
        t.add_row(['SET', *SET])
        t.add_row(['G1', *G1])
        t.add_row(['G2', *G2])
        t.add_row(['TopGate', *TG])

        print("Capacitance Matrix (aF):")
        print(t)

        # Store some class properties
        results = out(DeltaV1d, DeltaV2d, DeltaV,
                      (Edot * 1e3, Eset * 1e3, Ldot, Lset), DOT, SET, G1, G2,
                      TG)

        self.mat0 = mat
        self.mat1 = mat
        self.meas_info = D_I_info
        self.lines = new_line_pts
        self.results = results
Esempio n. 49
0
def find_banana(image, ruta='resultados'):
    #Se invierte el esquema RGB a BGR, ya que las funciones son más compatibles
    #teniendo el azul con más relevancia
    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    # cv2.imwrite('resultados/bgr.jpg', image)

    #Un tamaño fijo
    #con la dimension mas grande
    max_dimension = max(image.shape)
    #La escala de la imagen de salida no será mayor a 700px
    scale = 700 / max_dimension
    #Se redimenciona la imagen para que sea cuadrada.
    image = cv2.resize(image, None, fx=scale, fy=scale)

    #Reducimos el ruido de la imagen usando el filtro Gaussiano, con la escala
    #maxima cuadrada.
    # image_blur = cv2.bilateralFilter(image,9,75,75)
    image_blur = cv2.GaussianBlur(image, (7, 7), 0)
    cv2.imwrite(ruta + '/blur.jpg', image_blur)

    #Tratamos de enfocarnos en el color, y por esta razón nos enfocamos en
    #el esquema HSV, pues resalta el color y maneja solo saturacion y
    #valor
    image_blur_hsv = cv2.cvtColor(image_blur, cv2.COLOR_RGB2HSV)
    cv2.imwrite(ruta + '/hsv.jpg', image_blur_hsv)

    #kernel = np.ones((5,5),np.uint8)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    erosion = cv2.erode(image_blur_hsv, kernel, iterations=1)
    cv2.imwrite(ruta + '/erosionado.jpg', erosion)
    dilation = cv2.dilate(image_blur_hsv, kernel, iterations=1)
    cv2.imwrite(ruta + '/dilatado.jpg', dilation)
    dilation_blur = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel)
    cv2.imwrite(ruta + '/dilatado_blur.jpg', dilation_blur)

    # Filtro por color
    # 20-30 hue
    """Aqui tenemos un problema, pues decidimos colocar un rango de amarillos
    pero no lo reconoce en la imagen y tenemos dificultad al reconocer este
    rango, ya que a veces toma colores que no debe. Según el tono es de
    50 a 70 para amarillos"""
    #hsv(15, 80, 50)
    #hsv(105, 120, 255)
    min_yellow = np.array([15, 100, 80])
    max_yellow = np.array([105, 255, 255])
    # min_yellow = np.array([20, 100, 80])
    # max_yellow = np.array([30, 255, 255])
    #layer
    mask1 = cv2.inRange(dilation_blur, min_yellow, max_yellow)

    #hsv(230, 0, 0)
    #hsv(270, 255, 255)
    black_min = np.array([130, 0, 0])
    black_max = np.array([170, 255, 255])
    black_mask = cv2.inRange(dilation_blur, black_min, black_max)
    cv2.imwrite(ruta + '/mascara_negro.jpg', black_mask)

    #Filtro por brillo
    # 170-180 hue
    #Tratamos de resaltar el brillo para tener un mejor reconocimiento de
    #colores.
    #hsv(170,100,80)
    #hsv(180,255,255)
    min_yellow2 = np.array([170, 100, 80])
    max_yellow2 = np.array([180, 255, 255])
    mask2 = cv2.inRange(dilation_blur, min_yellow2, max_yellow2)
    cv2.imwrite(ruta + '/mascara1.jpg', mask1)
    cv2.imwrite(ruta + '/mascara2.jpg', mask2)

    #Combinamos las mascaras de colores.
    mask = mask1 + mask2 + black_mask
    cv2.imwrite(ruta + '/mask.jpg', mask)
    # opening = cv2.morphologyEx(dilation, cv2.MORPH_OPEN, kernel)
    # cv2.imwrite('resultados/opening.jpg', opening)

    # Se limpia la imagen y se crea la elipse.

    #Se erosiona la imagen para reducir espacios sin color. Y luego se dilata,
    #Esto dentro de lo que buscamos encerrar.
    mask_closed = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    mask_closed = cv2.dilate(mask_closed, kernel, iterations=3)
    # mask_closed = cv2.dilate(mask_closed, kernel, iterations = 1)
    # mask_closed = cv2.morphologyEx(mask_closed, cv2.MORPH_CLOSE, kernel)
    cv2.imwrite(ruta + '/closed.jpg', mask_closed)
    #Se dilata para reducr ruido afuera de lo que identificamos, y luego se erosiona.
    mask_clean = cv2.morphologyEx(mask_closed, cv2.MORPH_OPEN, kernel)
    cv2.imwrite(ruta + '/open.jpg', mask_clean)

    # Se encuentra el mejor patron y se recibe el contorno
    big_banana_contour, mask_bananas = find_biggest_contour(mask_clean)

    # Se resalta la mascara limpia y se aclara en la imagen.
    overlay = overlay_mask(mask_clean, image)
    cv2.imwrite(ruta + '/overlay.jpg', overlay)

    #Se circula el patron con mejor coincidencia.
    circled, cropped = circle_contour(image, big_banana_contour, ruta)

    #Y convertimos al esquema de original de colores.
    cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB)

    return circled, cropped
Esempio n. 50
0
def above(img):
    h, w = img.shape[:2]
    gray1 = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
    #cvtColor(src,type)
    #src原图像,type转换类型
    #cv2.COLOR_BGR2BGRA 将alpha通道增加到BGR或者RGB图像中
    ret, binary = cv2.threshold(gray1, 100, 255, cv2.THRESH_BINARY)
    #threshold(src,thresh,maxval,type)
    #src原图像,thresh阈值,maxval输出图像的最大值,type阈值类型
    #THRESH_BINARY---二值阈值化
    kernel = np.ones((10, 10), np.uint8)
    #设置方框大小及类型
    dst = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
    #cv2.morphologyEx(src, type, kernel)
    # src 原图像 type 运算类型 kernel 结构元素
    #cv2.MORPH_OPEN 进行开运算,指的是先进行腐蚀操作,再进行膨胀操作
    #开运算(open):先腐蚀后膨胀的过程。
    kernel = np.ones((100, 100), np.uint8)
    dst = cv2.morphologyEx(dst, cv2.MORPH_CLOSE, kernel)
    # cv2.MORPH_CLOSE 进行闭运算, 指的是先进行膨胀操作,再进行腐蚀操作
    #闭运算(close):先膨胀后腐蚀的过程。
    #cv2.imwrite("dst.jpg",dst)
    gray2 = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
    # BGR和灰度图的转换使用 cv2.COLOR_BGR2GRAY
    contours, hierarchy = cv2.findContours(gray2, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    #cv2.findContours(image, mode, method)
    #image一个8位单通道二值图像(非0即1) mode轮廓的检索模式:cv2.RETR_EXTERNAL表示只检测外轮廓  method为轮廓的近似办法: cv2.CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标
    #contours返回一个list,list中每个元素都是图像中的一个轮廓,用numpy中的ndarray表示
    #hierarchy返回一个可选的hiararchy结果,这是一个ndarray,其中的元素个数和轮廓个数相同,每个轮廓contours[i]对应4个hierarchy元素hierarchy[i][0] ~hierarchy[i][3],分别表示后一个轮廓、前一个轮廓、父轮廓、内嵌轮廓的索引编号,如果没有对应项,则该值为负数。
    area = []
    for i in range(len(contours)):
        area.append(cv2.contourArea(contours[i]))
    max_idx = np.argmax(area)
    cnt = contours[max_idx]
    #选择contours列表中的第一个元素
    perimeter = round(cv2.arcLength(cnt, True))
    #arcLength计算轮廓的周长,Ture表示轮廓已经闭合
    M = cv2.moments(cnt)
    #moments的到轮廓的一些特征 返回为一个字典 M["m00"]表示轮廓面积
    cX = int(M["m10"] / M["m00"])
    cY = int(M["m01"] / M["m00"])
    #获得轮廓的重心
    radio = perimeter / 6.28
    #计算轮廓的半径
    if cX < w / 2 and cY < h / 2:
        a = int(cX + 10 * radio)
        b = int(cX + radio)
        c = int(cY + 10 * radio)
        d = int(cY + radio)
        crop = img[d:c, b:a]
        # 剪裁图像的区域
    elif cX < w / 2 and cY > h / 2:
        a = int(cX + 10 * radio)
        b = int(cX + radio)
        c = int(cY - 10 * radio)
        d = int(cY - radio)
        crop = img[c:d, b:a]
        # 剪裁图像的区域
    elif cX > w / 2 and cY < h / 2:
        a = int(cX - 10 * radio)
        b = int(cX - radio)
        c = int(cY + 10 * radio)
        d = int(cY + radio)
        crop = img[d:c, a:b]
        # 剪裁图像的区域
    else:
        a = int(cX - 10 * radio)
        b = int(cX - radio)
        c = int(cY - 10 * radio)
        d = int(cY - radio)
        crop = img[c:d, a:b]
        # 剪裁图像的区域
    return crop
Esempio n. 51
0
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # define range of blue color in HSV
    lower_blue = np.array([hmin, smin, vmin])
    upper_blue = np.array([hmax, smax, vmax])

    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv, lower_blue, upper_blue)

    # morph
    if s == 1:
        kernel = np.ones((5, 5), np.uint8)
        dilation = cv2.dilate(mask, kernel, iterations=2)
        kernel = np.ones((15, 15), np.uint8)
        opening = cv2.morphologyEx(dilation, cv2.MORPH_OPEN, kernel)
        mask = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)

    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame, frame, mask=mask)

    cv2.imshow('mask', mask)
    cv2.imshow('res', res)
    cv2.imshow('image', frame)
    k = cv2.waitKey(1) & 0xFF
    if k == 27:
        break

cap.release()
cv2.destroyAllWindows()
Esempio n. 52
0
def backgroundSubtraction(inputVideoStream, dog_size):  # minimum dog sizes
    dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)

    Logger.debug("Dog Size is: %s" % dog_size)

    try:
        cap = cv2.VideoCapture(inputVideoStream)
    except Exception as exp:
        exit(1)
    width = cap.get(3)
    height = cap.get(4)
    resize = False
    if (int(width) != 640 and int(height) != 480):
        width = width / 2
        height = height / 2
        resize = True
    sizeOfFrame = (int(width), int(height))
    Logger.debug("Video Stream Acquired: " + str(inputVideoStream) +
                 " with dimensions " + str(sizeOfFrame))
    heightWidthRect = 750  # detectionArea if detectionArea > 0 else (height * width * 0.002)
    Logger.debug("Minimum Contour Area: " + str(heightWidthRect))
    allTracks = []
    frameNo = 0
    kernel = numpy.ones((5, 5), numpy.uint8)
    cntx = -1
    cnty = -1
    xcoord = np.array([[459, 450]
                       ]).T  # storing x and y coordinates as column vectors
    ycoord = np.array([[467, 466]]).T
    coord_list = []
    cameraMatrix = np.array([[532.80990646, 0.0, 342.49522219],
                             [0.0, 532.93344713, 233.88792491],
                             [0.0, 0.0, 1.0]])
    distCoeffs = np.array([
        -2.81325798e-01, 2.91150014e-02, 1.21234399e-03, -1.40823665e-04,
        1.54861424e-01
    ])
    axisLen = 0.01
    slbg = cv2.createBackgroundSubtractorMOG2(varThreshold=16,
                                              detectShadows=False)
    axispoints = np.float32([[0, 0, 0], [axisLen, 0, 0], [0, axisLen, 0],
                             [0, 0, axisLen]]).reshape(-1, 3)

    if dog_size == 0:
        heightWidthRect = 500
        Logger.debug("Small rectangle size is: " + str(heightWidthRect))

    elif dog_size == 1:
        heightWidthRect = 750
        Logger.debug("Medium rectangle size is: " + str(heightWidthRect))

    elif dog_size == 2:
        heightWidthRect = 1000
        Logger.debug("Large rectangle size is: " + str(heightWidthRect))

    try:
        while Config.RUN_FLAG:
            # capture frame-by-frame
            (ret, frame) = cap.read()
            if resize:
                frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
            if not ret:
                break

            fgmask = slbg.apply(frame)
            fgmask = cv2.blur(fgmask, (15, 15), (-1, -1))
            fgmask = cv2.threshold(fgmask, 235, 255, cv2.THRESH_BINARY)[1]
            fgmask = cv2.dilate(fgmask, None, iterations=2)
            fgmask = cv2.erode(fgmask, kernel, iterations=2)
            fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

            (_, contours, _) = cv2.findContours(fgmask.copy(),
                                                cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_SIMPLE)

            # contours is a double-array, outer array is a list of contours, inner array is points of a single contour
            # hierarchy is parent-child relationship of the contours
            # foreground = cv2.cvtColor(foreground, cv2.COLOR_BGR2GRAY)
            foreground = slbg.getBackgroundImage()
            res = cv2.aruco.detectMarkers(frame, dictionary)

            if len(res[0]) > 0:
                frame = cv2.aruco.drawDetectedMarkers(frame, res[0], res[1])
                rvecs, tvecs = cv2.aruco.estimatePoseSingleMarkers(
                    res[0], 0.05, cameraMatrix, distCoeffs)
                print cv2.aruco.estimatePoseSingleMarkers(
                    res[0], 0.05, cameraMatrix, distCoeffs)

                for i in range(0, len(res[1])):
                    #                cv2.circle(frame, Point(res[0][0][0][0][0],res[0][0][0][0][1]), 3, color=(255, 255, 255))
                    #                cv2.circle(frame, Point(res[0][0][0][1][0],res[0][0][0][1][1]), 3, color=(255, 255, 255))
                    #                cv2.circle(frame, Point(res[0][0][0][2][0],res[0][0][0][2][1]), 3, color=(255, 255, 255))
                    #                cv2.circle(frame, Point(res[0][0][0][3][0],res[0][0][0][3][1]), 3, color=(255, 255, 255))
                    #                frame = cv2.aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.01);
                    try:
                        imgpts, _ = cv2.projectPoints(
                            axispoints,
                            rvec=rvecs,
                            tvec=tvecs,
                            cameraMatrix=cameraMatrix,
                            distCoeffs=distCoeffs)

                        apt0 = Point(imgpts[0][0][0], imgpts[0][0][1])
                        apt1 = Point(imgpts[1][0][0], imgpts[1][0][1])
                        apt2 = Point(imgpts[2][0][0], imgpts[2][0][1])
                        apt3 = Point(imgpts[3][0][0], imgpts[3][0][1])
                        cv2.line(frame, apt0, apt1, (0, 0, 255), 3)
                        cv2.line(frame, apt0, apt2, (0, 255, 0), 3)
                        # cv2.line(frame, pt0, pt3, (255, 0, 0), 3)
                    except Exception as exp:
                        Logger.error(exp.message)

            listOfRectangles = []

            for c in contours:
                if cv2.contourArea(c) < heightWidthRect:
                    continue
                listOfRectangles.append(cv2.boundingRect(c))
                rect = cv2.minAreaRect(c)
                points = cv2.boxPoints(rect)
                points = np.int8(points)

                # check to see if any rectangle points are in list
                if np.any(
                        np.logical_and(xcoord == points[:, 0],
                                       ycoord == points[:, 1])
                ):  # checks to see if any instance occurs where coordinates are in bounding box
                    continue
                coord_list.append(points)

            if len(listOfRectangles) > 0:
                cv2.groupRectangles(listOfRectangles, 1, 0.05)
                if len(res[0]) > 0:
                    for arr in res[0]:
                        for ar in arr:
                            cntx = ar[0][0] + ar[1][0] + ar[2][0] + ar[3][0]
                            cnty = ar[0][1] + ar[1][1] + ar[2][1] + ar[3][1]
                            cntx = int(cntx / 4)
                            cnty = int(cnty / 4)
                    if len(res[0]) == 0:
                        cntx = -1
                        cnty = -1

                for rectangle in listOfRectangles:
                    (x, y, w, h) = rectangle
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)
                    cv2.putText(frame, "Movement No. " + str(len(allTracks)),
                                (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                (200, 0, 155), 1)
                    centerPt = Point((x + x + w) / 2, (y + y + h) / 2)
                    pathTrack(allTracks, centerPt, frame, frameNo)

                    pt1 = numpy.array([cntx, cnty])
                    pt2 = numpy.array(centerPt)
                    if cntx != -1 and numpy.linalg.norm(
                            pt1 - pt2
                    ) < 200 and apt1[1] < pt2[1] and apt2[1] < pt2[1]:
                        Logger.debug("in proximity")

            cv2.imshow("Security Feed", frame)
            cv2.imshow("movements", fgmask)
            cv2.imshow("foreground", foreground)

            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break
            k = cv2.waitKey(2) & 0xff
            if k == 27:
                break
            frameNo = frameNo + 1
    except Exception as exp:
        Logger.error(exp)
    finally:
        cap.release()
        readFinished = True
        cv2.destroyAllWindows()
        Logger.debug("Total Tracks Detected:" + str(len(allTracks)))
        pathModel = PathModel(height=height, width=width, tracks=allTracks)
Esempio n. 53
0
file_prueba='/hough_prueba.png'

#img1 = cv2.imread(path+file2,cv2.IMREAD_GRAYSCALE)

img=cv2.imread(path+file_prueba, cv2.IMREAD_GRAYSCALE)

plt.figure()
plt.imshow(img)


ret, thresh = cv2.threshold(img,90,255,0)
#thresh= np.uint8(255*(thresh-thresh.min()) / (thresh.max()-thresh.min()))


kernel=np.ones(5)
thresh=cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel)
#thresh=cv2.GaussianBlur(thresh,(5,5),cv2.BORDER_DEFAULT)

#thresh=cv2.
plt.figure('Campo de cultivo')
plt.imshow(thresh,'gray')

img=thresh
#img = abs(thresh-255)
# %%
# Para una simulacion mala, comentar if y ejecutar cv2.HL con 300

img1 = cv2.imread(path+file_prueba)


#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
Esempio n. 54
0
detect = []
offset = 6  # allowable errorbetween pixel
counter = 0

while True:
    ret, frame1 = cap.read()
    grey = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(grey, (3, 3), 5)

    # applying on each frame
    img_sub = algorithm.apply(blur)
    dilat = cv2.dilate(img_sub, np.ones((5, 5)))
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

    dilatada = cv2.morphologyEx(dilat, cv2.MORPH_CLOSE, kernel)
    dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel)
    dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel)
    counterSahpe, h = cv2.findContours(dilatada, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)

    cv2.line(frame1, (25, count_line_position), (1200, count_line_position),
             (255, 127, 0), 3)

    for (i, c) in enumerate(counterSahpe):
        (x, y, w, h) = cv2.boundingRect(c)
        validate_counter = (w >= min_width_rect) and (h >= min_height_rect)

        if not validate_counter:
            continue
Esempio n. 55
0
    def __preTreatment(self, car_pic):
        if type(car_pic) == type(""):
            img = self.__imreadex(car_pic)
        else:
            img = car_pic
        pic_hight, pic_width = img.shape[:2]

        if pic_width > self.MAX_WIDTH:
            resize_rate = self.MAX_WIDTH / pic_width
            img = cv2.resize(img, (self.MAX_WIDTH, int(pic_hight * resize_rate)),
                             interpolation=cv2.INTER_AREA)  # 图片分辨率调整
        # cv2.imshow('Image', img)

        blur = self.cfg["blur"]
        # 高斯去噪
        if blur > 0:
            img = cv2.GaussianBlur(img, (blur, blur), 0)
        oldimg = img
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # cv2.imshow('GaussianBlur', img)

        kernel = np.ones((20, 20), np.uint8)
        img_opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)  # 开运算
        img_opening = cv2.addWeighted(img, 1, img_opening, -1, 0);  # 与上一次开运算结果融合
        # cv2.imshow('img_opening', img_opening)

        # 找到图像边缘
        ret, img_thresh = cv2.threshold(img_opening, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)  # 二值化
        img_edge = cv2.Canny(img_thresh, 100, 200)
        # cv2.imshow('img_edge', img_edge)

        # 使用开运算和闭运算让图像边缘成为一个整体
        kernel = np.ones((self.cfg["morphologyr"], self.cfg["morphologyc"]), np.uint8)
        img_edge1 = cv2.morphologyEx(img_edge, cv2.MORPH_CLOSE, kernel)  # 闭运算
        img_edge2 = cv2.morphologyEx(img_edge1, cv2.MORPH_OPEN, kernel)  # 开运算
        # cv2.imshow('img_edge2', img_edge2)

        # 查找图像边缘整体形成的矩形区域,可能有很多,车牌就在其中一个矩形区域中
        contours, hierarchy = cv2.findContours(img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        contours = [cnt for cnt in contours if cv2.contourArea(cnt) > self.Min_Area]
        # print(contours[0])

        # 逐个排除不是车牌的矩形区域
        car_contours = []
        for cnt in contours:
            # 框选 生成最小外接矩形 返回值(中心(x,y), (宽,高), 旋转角度)
            rect = cv2.minAreaRect(cnt)
            # print('宽高:',rect[1])
            area_width, area_height = rect[1]
            # 选择宽大于高的区域
            if area_width < area_height:
                area_width, area_height = area_height, area_width
            wh_ratio = area_width / area_height
            # print('宽高比:',wh_ratio)
            # 要求矩形区域长宽比在2到5.5之间,2到5.5是车牌的长宽比,其余的矩形排除
            if wh_ratio > 2 and wh_ratio < 5.5:
                car_contours.append(rect)
                box = cv2.boxPoints(rect)
                box = np.int0(box)
            # 框出所有可能的矩形
            # oldimg = cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
            # cv2.imshow("Test",oldimg )
            # print(car_contours)

        # 矩形区域可能是倾斜的矩形,需要矫正,以便使用颜色定位
        card_imgs = []
        for rect in car_contours:
            if rect[2] > -1 and rect[2] < 1:  # 创造角度,使得左、高、右、低拿到正确的值
                angle = 1
            else:
                angle = rect[2]
            rect = (rect[0], (rect[1][0] + 5, rect[1][1] + 5), angle)  # 扩大范围,避免车牌边缘被排除
            box = cv2.boxPoints(rect)
            heigth_point = right_point = [0, 0]
            left_point = low_point = [pic_width, pic_hight]
            for point in box:
                if left_point[0] > point[0]:
                    left_point = point
                if low_point[1] > point[1]:
                    low_point = point
                if heigth_point[1] < point[1]:
                    heigth_point = point
                if right_point[0] < point[0]:
                    right_point = point

            if left_point[1] <= right_point[1]:  # 正角度
                new_right_point = [right_point[0], heigth_point[1]]
                pts2 = np.float32([left_point, heigth_point, new_right_point])  # 字符只是高度需要改变
                pts1 = np.float32([left_point, heigth_point, right_point])
                M = cv2.getAffineTransform(pts1, pts2)
                dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
                self.__point_limit(new_right_point)
                self.__point_limit(heigth_point)
                self.__point_limit(left_point)
                card_img = dst[int(left_point[1]):int(heigth_point[1]), int(left_point[0]):int(new_right_point[0])]
                card_imgs.append(card_img)

            elif left_point[1] > right_point[1]:  # 负角度

                new_left_point = [left_point[0], heigth_point[1]]
                pts2 = np.float32([new_left_point, heigth_point, right_point])  # 字符只是高度需要改变
                pts1 = np.float32([left_point, heigth_point, right_point])
                M = cv2.getAffineTransform(pts1, pts2)
                dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
                self.__point_limit(right_point)
                self.__point_limit(heigth_point)
                self.__point_limit(new_left_point)
                card_img = dst[int(right_point[1]):int(heigth_point[1]), int(new_left_point[0]):int(right_point[0])]
                card_imgs.append(card_img)
        # cv2.imshow("card", card_imgs[0])

        # #____开始使用颜色定位,排除不是车牌的矩形,目前只识别蓝、绿、黄车牌
        colors = []
        for card_index, card_img in enumerate(card_imgs):
            green = yellow = blue = black = white = 0
            try:
                # 有转换失败的可能,原因来自于上面矫正矩形出错
                card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
            except:
                print('BGR转HSV失败')
                card_imgs = colors = None
                return card_imgs, colors

            if card_img_hsv is None:
                continue
            row_num, col_num = card_img_hsv.shape[:2]
            card_img_count = row_num * col_num

            # 确定车牌颜色
            for i in range(row_num):
                for j in range(col_num):
                    H = card_img_hsv.item(i, j, 0)
                    S = card_img_hsv.item(i, j, 1)
                    V = card_img_hsv.item(i, j, 2)
                    if 11 < H <= 34 and S > 34:  # 图片分辨率调整
                        yellow += 1
                    elif 35 < H <= 99 and S > 34:  # 图片分辨率调整
                        green += 1
                    elif 99 < H <= 124 and S > 34:  # 图片分辨率调整
                        blue += 1

                    if 0 < H < 180 and 0 < S < 255 and 0 < V < 46:
                        black += 1
                    elif 0 < H < 180 and 0 < S < 43 and 221 < V < 225:
                        white += 1
            color = "no"
            # print('黄:{:<6}绿:{:<6}蓝:{:<6}'.format(yellow,green,blue))

            limit1 = limit2 = 0
            if yellow * 2 >= card_img_count:
                color = "yellow"
                limit1 = 11
                limit2 = 34  # 有的图片有色偏偏绿
            elif green * 2 >= card_img_count:
                color = "green"
                limit1 = 35
                limit2 = 99
            elif blue * 2 >= card_img_count:
                color = "blue"
                limit1 = 100
                limit2 = 124  # 有的图片有色偏偏紫
            elif black + white >= card_img_count * 0.7:
                color = "bw"
            # print(color)
            colors.append(color)
            # print(blue, green, yellow, black, white, card_img_count)
            if limit1 == 0:
                continue

            # 根据车牌颜色再定位,缩小边缘非车牌边界
            xl, xr, yh, yl = self.accurate_place(card_img_hsv, limit1, limit2, color)
            if yl == yh and xl == xr:
                continue
            need_accurate = False
            if yl >= yh:
                yl = 0
                yh = row_num
                need_accurate = True
            if xl >= xr:
                xl = 0
                xr = col_num
                need_accurate = True
            card_imgs[card_index] = card_img[yl:yh, xl:xr] \
                if color != "green" or yl < (yh - yl) // 4 else card_img[yl - (yh - yl) // 4:yh, xl:xr]
            if need_accurate:  # 可能x或y方向未缩小,需要再试一次
                card_img = card_imgs[card_index]
                card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
                xl, xr, yh, yl = self.accurate_place(card_img_hsv, limit1, limit2, color)
                if yl == yh and xl == xr:
                    continue
                if yl >= yh:
                    yl = 0
                    yh = row_num
                if xl >= xr:
                    xl = 0
                    xr = col_num
            card_imgs[card_index] = card_img[yl:yh, xl:xr] \
                if color != "green" or yl < (yh - yl) // 4 else card_img[yl - (yh - yl) // 4:yh, xl:xr]
        # cv2.imshow("result", card_imgs[0])
        # cv2.imwrite('1.jpg', card_imgs[0])
        # print('颜色识别结果:' + colors[0])
        return card_imgs, colors
Esempio n. 56
0
def idcard_region():

    idcard_img = "D:/abner/project/dataset/idcard/VOC2007/JPEGImages/20.jpg"
    img = cv2.imread(idcard_img)
    #resize
    img = cv2.resize(img, (600, 449))
    #灰度图
    hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    #H 通道
    img_h = hsv_img[..., 0]

    img_h_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img_h_hist = calcAndDrawHist(img_h_gray, [255, 0, 0])
    #高斯滤波
    img_h_gray = cv2.GaussianBlur(img_h_gray, (3, 3), 0)

    #图像最大值和最小值
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(img_h_gray)

    #二值化
    # rec2, thresh2 = cv2.threshold(img_h_gray, 10, 255, cv2.THRESH_BINARY)
    # print("val th1 ", rec2)
    #大律法otsu
    rec2, thresh2 = cv2.threshold(img_h_gray, 50, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    print("val th ", rec2)
    #自适应阈值
    # adp_thresh = cv2.adaptiveThreshold(img_h_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
    #                            cv2.THRESH_BINARY, 11, 2)
    # 高斯滤波
    # adp_thresh2 = cv2.GaussianBlur(adp_thresh, (3, 3), 0)
    # 反色,即对二值图每个像素取反
    result = cv2.bitwise_not(thresh2)
    cv2.imshow("thresh2 ", result)
    # gauss_img  = cv2.GaussianBlur(result, (3, 3), 0)
    # cv2.imshow("gauss img ",gauss_img)

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    eroded = cv2.erode(result, kernel)  # 腐蚀图像
    dilated = cv2.dilate(eroded, kernel)  # 膨胀图像

    # cv2.imshow("erode",eroded)

    # X Gradient
    xgrad = cv2.Sobel(img_h_gray, cv2.CV_16SC1, 1, 0)  # 计算梯度,(API要求不能为浮点数)
    # Y Gradient
    ygrad = cv2.Sobel(img_h_gray, cv2.CV_16SC1, 0, 1)
    # grad_x = cv2.Scharr(img_h_gray, cv2.CV_16SC1, 1, 0)
    # grad_y = cv2.Scharr(img_h_gray, cv2.CV_16SC1, 0, 1)
    # edge 边缘检测
    #像素值最大值和最小值当做高低阈值
    threshold_val = (maxVal + minVal) / 2
    threshold_low = maxVal * 0.5
    #有两种方式,一种是canny(img,thresh1,thresh2),另一种是通过梯度cany(xgrad,ygrad,thresh1,thresh2)
    edge_output = cv2.Canny(xgrad, ygrad, threshold_val,
                            threshold_low)  # 调用cv.Canny,利用高低阈值求出图像边缘
    cv2.imshow("edge ", edge_output)
    #中值滤波
    img_medianBlur = cv2.medianBlur(edge_output, 3)  # 中值滤波
    cv2.imshow("midd result", img_medianBlur)  # 显示中值滤波结果
    gauss_img = cv2.GaussianBlur(edge_output, (5, 5), 0)
    cv2.imshow("gauss img ", gauss_img)

    #边缘提取后进行腐蚀

    eroded_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    eroded = cv2.erode(gauss_img.copy(), eroded_kernel)  # 腐蚀图像
    cv2.imshow("eroded ", eroded)

    dilated_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    dilated = cv2.dilate(img_medianBlur, dilated_kernel)  # 膨胀图像
    cv2.imshow("dilated ", dilated)

    # 闭运算
    kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (11, 11))
    closing_img = cv2.morphologyEx(dilated.copy(), cv2.MORPH_CLOSE, kernel3)
    cv2.imshow("closing ", closing_img)
    #开运算
    kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    # open_img = cv2.morphologyEx(closing_img.copy(), cv2.MORPH_OPEN, kernel2)
    # cv2.imshow("opening ", open_img)

    src_img = find_contours(img, closing_img)
    cv2.imshow("count ", src_img)

    cv2.waitKey(0)
Esempio n. 57
0
def closing(img, kernel_size=5):
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    return cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
Esempio n. 58
0
def game_of_draw_letters(color_point, color_pen, level):
    level = int(level)
    inf_ob = Interface('gameofdrawletters')
    max_level = inf_ob.getMaxLevel()
    if level > max_level:
        temp = np.ones((480, 640, 3), dtype=np.uint8)
        temp *= 255
        cv2.putText(temp, "You have completed all levels of this game",
                    (50, 220), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (
                        0,
                        255,
                        0,
                    ), 2, cv2.LINE_AA)
        cv2.imshow('Message', temp)
        cv2.waitKey(5000)
        cv2.destroyAllWindows()
        return

    blueLower1 = None
    blueUpper1 = None
    blueLower2 = None
    blueUpper2 = None
    redLower1 = None
    redUpper1 = None
    redLower2 = None
    redUpper2 = None

    if color_pen == 'Blue':
        blueLower1 = np.array([100, 60, 60])
        blueUpper1 = np.array([140, 255, 255])
        blueLower2 = np.array([100, 60, 60])
        blueUpper2 = np.array([140, 255, 255])

    elif color_pen == 'Red':
        blueLower1 = np.array([170, 70, 50])
        blueUpper1 = np.array([180, 255, 255])
        blueLower2 = np.array([0, 70, 50])
        blueUpper2 = np.array([10, 255, 255])

    else:  #[40,100,50],[75,255,255]
        blueLower1 = np.array([36, 25, 25])
        blueUpper1 = np.array([70, 255, 255])
        blueLower2 = np.array([36, 25, 25])
        blueUpper2 = np.array([70, 255, 255])

    if color_point == 'Blue':
        redLower1 = np.array([100, 60, 60])
        redUpper1 = np.array([140, 255, 255])
        redLower2 = np.array([100, 60, 60])
        redUpper2 = np.array([140, 255, 255])

    elif color_point == 'Red':
        redLower1 = np.array([170, 70, 50])
        redUpper1 = np.array([180, 255, 255])
        redLower2 = np.array([0, 70, 50])
        redUpper2 = np.array([10, 255, 255])

    else:  #[40,100,50],[75,255,255]
        redLower1 = np.array([36, 25, 25])
        redUpper1 = np.array([70, 255, 255])
        redLower2 = np.array([36, 25, 25])
        redUpper2 = np.array([70, 255, 255])

    # Load the models built in the previous steps
    cnn_model = load_model('emnist_cnn_model.h5')

    # Letters lookup
    letters = {
        1: 'a',
        2: 'b',
        3: 'c',
        4: 'd',
        5: 'e',
        6: 'f',
        7: 'g',
        8: 'h',
        9: 'i',
        10: 'j',
        11: 'k',
        12: 'l',
        13: 'm',
        14: 'n',
        15: 'o',
        16: 'p',
        17: 'q',
        18: 'r',
        19: 's',
        20: 't',
        21: 'u',
        22: 'v',
        23: 'w',
        24: 'x',
        25: 'y',
        26: 'z',
        27: '-'
    }

    # Define the upper and lower boundaries for a color to be considered "Blue"
    min_area = 1200
    max_area = 10000

    # Define a 5x5 kernel for erosion and dilation
    kernel = np.ones((5, 5), np.uint8)

    # Define Black Board
    blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
    alphabet = np.zeros((200, 200, 3), dtype=np.uint8)

    # Setup deques to store alphabet drawn on screen
    points = deque(maxlen=512)

    # Define prediction variables

    prediction = 26

    def getContour(cnts):
        cnt = None
        cur_area = 0
        for ct in cnts:
            area = float(cv2.contourArea(ct))
            if area >= min_area and area <= max_area and area > cur_area:
                cur_area = area
                cnt = ct
        return cnt

    camera = cv2.VideoCapture(0)
    br = True
    while level <= max_level and br:
        cv2.namedWindow('Draw this!', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('Draw this!', 640, 480)
        temp = np.ones((480, 640, 3), dtype=np.uint8)
        temp *= 255
        cv2.putText(temp, "Level" + str(level), (200, 220),
                    cv2.FONT_HERSHEY_SIMPLEX, 2, (
                        0,
                        255,
                        0,
                    ), 2, cv2.LINE_AA)
        cv2.imshow('Draw this!', temp)
        cv2.waitKey(3000)

        d_char = letters[random.randrange(1, 27, 1)]

        d = cv2.imread('images/DrawGame/draw_this/draw_' + d_char + '.PNG')
        prob_dist = []
        trials = 0
        thresh_t = 5
        crs = 0
        count = 0
        while True:
            # Grab the current paintWindow
            try:
                if level == 1: cv2.imshow('Draw this!', d)
                elif level == 2:
                    t = np.ones((480, 640, 3), dtype=np.uint8)
                    t *= 255
                    cv2.putText(t, "Listen to the sound and draw letter.",
                                (70, 220), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (
                                    0,
                                    255,
                                    0,
                                ), 2, cv2.LINE_AA)
                    cv2.imshow('Draw this!', t)
                    if count == 0:
                        cv2.waitKey(1000)
                        filename = 'audios/' + d_char.upper() + '.wav'
                        playsound(filename)
                        count = 500

                (grabbed, frame) = camera.read()
                if not grabbed:
                    continue
                frame = cv2.flip(frame, 1)
                hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # Determine which pixels fall within the blue boundaries and then blur the binary image
                blueMask1 = cv2.inRange(hsv, blueLower1, blueUpper1)
                blueMask2 = cv2.inRange(hsv, blueLower2, blueUpper2)
                blueMask = blueMask1 | blueMask2
                blueMask = cv2.erode(blueMask, kernel, iterations=2)
                blueMask = cv2.morphologyEx(blueMask, cv2.MORPH_OPEN, kernel)
                blueMask = cv2.dilate(blueMask, kernel, iterations=1)

                redMask1 = cv2.inRange(hsv, redLower1, redUpper1)
                redMask2 = cv2.inRange(hsv, redLower2, redUpper2)
                redMask = redMask1 | redMask2
                redMask = cv2.erode(redMask, kernel, iterations=2)
                redMask = cv2.morphologyEx(redMask, cv2.MORPH_OPEN, kernel)
                redMask = cv2.dilate(redMask, kernel, iterations=1)

                # Find contours (bottle cap in my case) in the image
                (cnts_blue, _) = cv2.findContours(blueMask.copy(),
                                                  cv2.RETR_EXTERNAL,
                                                  cv2.CHAIN_APPROX_SIMPLE)
                center = None
                (cnts_red, _) = cv2.findContours(redMask.copy(),
                                                 cv2.RETR_EXTERNAL,
                                                 cv2.CHAIN_APPROX_SIMPLE)

                cnt = getContour(cnts_blue)
                if cnt is not None:
                    ((x, y), radius) = cv2.minEnclosingCircle(cnt)
                    # Draw the circle around the contour
                    cv2.circle(frame, (int(x), int(y)), int(radius),
                               (0, 255, 255), 2)
                    M = cv2.moments(cnt)
                    center = (int(M['m10'] / M['m00']),
                              int(M['m01'] / M['m00']))
                    points.appendleft(center)

                else:
                    if len(points) != 0:
                        blackboard_gray = cv2.cvtColor(blackboard,
                                                       cv2.COLOR_BGR2GRAY)
                        blur1 = cv2.medianBlur(blackboard_gray, 15)
                        blur1 = cv2.GaussianBlur(blur1, (5, 5), 0)
                        thresh1 = cv2.threshold(
                            blur1, 0, 255,
                            cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
                        blackboard_cnts = cv2.findContours(
                            thresh1.copy(), cv2.RETR_TREE,
                            cv2.CHAIN_APPROX_NONE)[0]
                        if len(blackboard_cnts) >= 1:
                            cnt = sorted(blackboard_cnts,
                                         key=cv2.contourArea,
                                         reverse=True)[0]

                            if cv2.contourArea(cnt) > 1000:
                                x, y, w, h = cv2.boundingRect(cnt)
                                alphabet = blackboard_gray[y - 10:y + h + 10,
                                                           x - 10:x + w + 10]
                                newImage = cv2.resize(alphabet, (28, 28))
                                newImage = np.array(newImage)
                                newImage = newImage.astype('float32') / 255

                                prediction = cnn_model.predict(
                                    newImage.reshape(1, 28, 28, 1))[0]
                                prob_dist = prediction
                                prediction = np.argmax(prediction)
                                #trials=trials-1

                        # Empty the points deque and the blackboard
                        points = deque(maxlen=512)
                        blackboard = np.zeros((480, 640, 3), dtype=np.uint8)

                    if len(cnts_red) > 0:
                        cnt = getContour(cnts_red)
                        if cnt is not None:
                            # Get the radius of the enclosing circle around the found contour
                            ((x, y), radius) = cv2.minEnclosingCircle(cnt)
                            # Draw the circle around the contour
                            center = (int(x), int(y))
                            cv2.circle(frame, center, int(radius),
                                       (0, 255, 255), 2)
                            cv2.circle(blackboard, center, 5, (0, 255, 0), -1)

                # Connect the points with a line

                for i in range(1, len(points)):
                    if points[i - 1] is None or points[i] is None:
                        continue
                    #flag=0
                    #cv2.line(frame, points[i - 1], points[i], (0, 0, 0), 2)
                    cv2.line(blackboard, points[i - 1], points[i],
                             (255, 255, 255), 8)

                # Put the result on the screen
                #cv2.putText(frame, "Multilayer Perceptron : " + str(letters[int(prediction1)+1]), (10, 410), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(255, 255, 255), 2)
            # cv2.putText(frame, "Convolution Neural Network:  " + str(letters[int(prediction)+1]), (10, 440), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
                predicted_char = str(letters[int(prediction) + 1])
                if predicted_char == d_char:
                    rating = ''
                    if prob_dist[prediction] >= 0.95: rating = 'Bravo'
                    elif prob_dist[prediction] >= 0.85: rating = 'Excellent'
                    elif prob_dist[prediction] >= 0.75: rating = 'Very_good'
                    elif prob_dist[prediction] >= 0.50: rating = 'Good'
                    else: rating = 'Fair'
                    if prob_dist[prediction] >= 0.85: crs = crs + 1
                    filename = 'audios/' + rating + '.wav'
                    wave_obj = sa.WaveObject.from_wave_file(filename)
                    play_obj = wave_obj.play()
                    play_obj.wait_done()
                    cv2.waitKey(33)
                    cv2.namedWindow('Result', cv2.WINDOW_NORMAL)
                    cv2.resizeWindow('Result', 640, 480)
                    x = cv2.imread('images/DrawGame/objects/object_' + d_char +
                                   '.jpg')
                    cv2.imshow('Result', x)
                    cv2.waitKey(35)
                    filename = 'audios/audio_' + d_char + '.wav'
                    wave_obj = sa.WaveObject.from_wave_file(filename)
                    play_obj = wave_obj.play()
                    play_obj.wait_done(
                    )  # Wait until sound has finished playing
                    cv2.waitKey(0)
                    prediction = 26
                    cv2.destroyWindow('Result')
                    cv2.waitKey(33)
                    d_char = letters[random.randrange(1, 27, 1)]
                    d = cv2.imread('images/DrawGame/draw_this/draw_' + d_char +
                                   '.PNG')
                    count = 0
                    thresh_t = thresh_t - 1
                    trials = trials + 1

                elif predicted_char != '-':
                    filename = 'audios/try_again.wav'
                    wave_obj = sa.WaveObject.from_wave_file(filename)
                    play_obj = wave_obj.play()
                    play_obj.wait_done(
                    )  # Wait until sound has finished playing
                    prediction = 26
                    trials = trials + 1
                    points = deque(maxlen=512)
                    blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
                    count = 0

                # Show the frame
                cv2.imshow("Board", blackboard)

                if len(points) == 0:
                    blackboard = np.zeros((480, 640, 3), dtype=np.uint8)

                if thresh_t == 0:
                    #trials=5
                    score = float(crs) / trials
                    if score < 0: score = 0
                    if score >= 0.7:
                        inf_ob.update_scores(level, score)
                        temp = np.ones((480, 640, 3), dtype=np.uint8)
                        temp *= 255
                        if level < max_level:
                            cv2.putText(temp, "Congrats, Level UP!!",
                                        (200, 220), cv2.FONT_HERSHEY_SIMPLEX,
                                        1, (
                                            0,
                                            255,
                                            0,
                                        ), 2, cv2.LINE_AA)
                        else:
                            cv2.putText(temp, "Congrats, all levels completed",
                                        (90, 220), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (
                                            0,
                                            255,
                                            0,
                                        ), 2, cv2.LINE_AA)
                        cv2.imshow('Draw this!', temp)
                        cv2.waitKey(5000)
                        level = level + 1
                        break
                    else:
                        temp = np.ones((480, 640, 3), dtype=np.uint8)
                        temp *= 255
                        cv2.putText(temp,
                                    "Try again Level " + str(level) + "!!",
                                    (200, 220), cv2.FONT_HERSHEY_SIMPLEX, 1, (
                                        0,
                                        255,
                                        0,
                                    ), 2, cv2.LINE_AA)
                        cv2.imshow('Draw this!', temp)
                        cv2.waitKey(5000)
                        break
                if (level == 2 and count > 0): count = count - 1
            except:
                print('exception occurs')
                pass

            # If the 'q' key is pressed, stop the loop
            if cv2.waitKey(1) & 0xFF == ord("q"):
                br = False
                break

    # Cleanup the camera and close any open windows
    camera.release()
    cv2.destroyAllWindows()
Esempio n. 59
0
if __name__ == "__main__":

    #    img=cv2.imread('test_images/coins1.jpg',3)
    #    img=cv2.imread("D:/projects/regiongrow/regiongrow/Images/error/3-1.png",3)
    img = cv2.imread('test_images/7-4.png', 3)
    binary = cv2.imread('test_images/luquan2.png', 0)
    h, w, c = img.shape
    show_img("img", img, 0)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(gray, 0, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    show_img("thresh", thresh, 0)

    s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    morph_img = cv2.morphologyEx(binary, cv2.MORPH_OPEN, s, 0, iterations=1)
    #    show_img('morph_img',morph_img,0)
    morph_img = cv2.morphologyEx(morph_img,
                                 cv2.MORPH_CLOSE,
                                 s,
                                 0,
                                 iterations=1)
    show_img('morph_img', morph_img, 0)

    sure_bg = cv2.dilate(morph_img, s, iterations=3)
    show_img('sure_bg', sure_bg, 0)

    dist_transform = cv2.distanceTransform(morph_img, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.3 * dist_transform.max(),
                                 255, 0)
    cv2.normalize(dist_transform,
def Number_Recognition():
    img_ori = cv2.imread(
        '/home/pi/OpenCV/licence_plate_recognition/capturetest2.jpg')

    height, width, channel = img_ori.shape  #이미지 사이즈

    plt.figure(figsize=(12, 10))
    plt.imshow(img_ori, cmap='gray')
    plt.show()
    #plt.savefig('./fig1.png')

    gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)  #프로세싱하기 쉽게 그레이로 바꿈

    plt.figure(figsize=(12, 10))
    plt.imshow(gray, cmap='gray')
    plt.show()

    structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, structuringElement)
    imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT,
                                   structuringElement)

    imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
    gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)

    #그레이 스케일로 바꾼 후 스레쉬홀링(어댑티브): 어떤 기준점을 설정하고 그 기준점 보다 낮으면 0, 높으면 1로 처리하여 이미지를 구별하기 쉽게 만드는 것이다.(흑백으로 나누는 것)
    #,노이즈를 줄이기 위한 가우시안 블러
    img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)

    img_thresh = cv2.adaptiveThreshold(
        img_blurred,
        maxValue=255.0,
        adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
        thresholdType=cv2.THRESH_BINARY_INV,
        blockSize=19,
        C=9)

    plt.figure(figsize=(12, 10))
    plt.imshow(img_thresh, cmap='gray')
    plt.show()

    #컨투어는 윤곽선, 스레쉬홀드한 이미지에서 파인드컨투어와 드로우 컨투어를 하면 그 이미지에서 윤곽선을 선으로 그려준다.
    _, contours, _ = cv2.findContours(img_thresh,
                                      mode=cv2.RETR_LIST,
                                      method=cv2.CHAIN_APPROX_SIMPLE)

    temp_result = np.zeros((height, width, channel), dtype=np.uint8)

    cv2.drawContours(temp_result,
                     contours=contours,
                     contourIdx=-1,
                     color=(255, 255, 255))

    plt.figure(figsize=(12, 10))
    plt.imshow(temp_result)
    plt.show()

    temp_result = np.zeros((height, width, channel), dtype=np.uint8)

    #contours_dict에 컨투어에 대한 정보를 저장한다.
    contours_dict = []

    #boundingRect함수를 써서 컨투어(윤곽선)을 감싸는 사각형을 구한다. (이미지에 사각형을 그리는 역할)
    for contour in contours:
        x, y, w, h = cv2.boundingRect(contour)
        cv2.rectangle(temp_result,
                      pt1=(x, y),
                      pt2=(x + w, y + h),
                      color=(255, 255, 255),
                      thickness=2)

        # insert to dict
        contours_dict.append({
            'contour': contour,  #컨투어 저장
            'x': x,
            'y': y,
            'w': w,
            'h': h,
            'cx': x + (w / 2),  #컨투어를 감싼 사각형의 중심좌표
            'cy': y + (h / 2)
        })

    plt.figure(figsize=(12, 10))
    plt.imshow(temp_result, cmap='gray')
    plt.show()

    #바운딩Rect의 최소 넓이,너비,높이,비율 (인식할 사각형의 기준을 정해준다.)
    MIN_AREA = 80
    MIN_WIDTH, MIN_HEIGHT = 2, 8
    MIN_RATIO, MAX_RATIO = 0.25, 1.0

    #그중 가능한 것들을 이 배열에 다 저장한다.
    possible_contours = []

    cnt = 0
    #아까 지정한 contours_dict를 돌면서 넓이와 가로대비 세로비율을 계산한다.
    for d in contours_dict:
        area = d['w'] * d['h']
        ratio = d['w'] / d['h']

        #그 중 우리가 설정한 조건들을 만족한 것들만 번호판일 확률이 높으니 possible_contours에 append한다.
        if area > MIN_AREA \
        and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT \
        and MIN_RATIO < ratio < MAX_RATIO:
            d['idx'] = cnt  #각 컨투어에 idx값을 매겨놓는다.
            cnt += 1
            possible_contours.append(d)

# visualize possible contours
    temp_result = np.zeros((height, width, channel), dtype=np.uint8)

    for d in possible_contours:
        cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
        cv2.rectangle(temp_result,
                      pt1=(d['x'], d['y']),
                      pt2=(d['x'] + d['w'], d['y'] + d['h']),
                      color=(255, 255, 255),
                      thickness=2)

    plt.figure(figsize=(12, 10))
    plt.imshow(temp_result, cmap='gray')
    plt.show()

    #위 과정까지 거치면 contourRect가 여러가지 생기는데 그 중 번호판과 관련된 것들은 일정한 간격과 크기를 가지고 연속적으로 배열되어 있을 것이다.
    #그러한 특성을 설정하여 contourRect가 번호판의 Rect인지 확인 할 수 있다.

    MAX_DIAG_MULTIPLYER = 5  # 5
    MAX_ANGLE_DIFF = 18.0  # 12.0  angle 12 ->15, area 0.5->0.3 수정 후 Andrew2 인식됨
    MAX_AREA_DIFF = 0.4  # 0.5
    MAX_WIDTH_DIFF = 0.3  #0.5
    MAX_HEIGHT_DIFF = 0.2  #0.2
    MIN_N_MATCHED = 3  # 3

    #나중에 재귀함수로 사용
    def find_chars(contour_list):
        matched_result_idx = []  #최종 인덱스 값을 저장

        #컨투어d1과 컨투어d2를 비교하여 같으면 비교할 필요가 없으니 컨티뉴
        for d1 in contour_list:
            matched_contours_idx = []
            for d2 in contour_list:
                if d1['idx'] == d2['idx']:
                    continue

                dx = abs(d1['cx'] -
                         d2['cx'])  #'cx': x + (w / 2) 컨투어를 감싼 사각형의 중심좌표
                dy = abs(d1['cy'] - d2['cy'])
                #직각삼각형에서 c
                diagonal_length1 = np.sqrt(d1['w']**2 + d1['h']**2)
                #벡터a와 벡터b 사이의 거리를 구한다.
                distance = np.linalg.norm(
                    np.array([d1['cx'], d1['cy']]) -
                    np.array([d2['cx'], d2['cy']]))
                if dx == 0:
                    angle_diff = 90
                else:
                    angle_diff = np.degrees(np.arctan(
                        dy / dx))  #두 컨투어간의 각도를 아크탄젠트로 구한다
                area_diff = abs(d1['w'] * d1['h'] - d2['w'] * d2['h']) / (
                    d1['w'] * d1['h'])  #넓이 비율
                width_diff = abs(d1['w'] - d2['w']) / d1['w']  #길이 비율
                height_diff = abs(d1['h'] - d2['h']) / d1['h']  #높이 비율

                #Rectangle사이의 거리, angle , 넓이 비율, 길이,너비 비율 비교 후 기준을 만족하면 append
                if distance < diagonal_length1 * MAX_DIAG_MULTIPLYER \
                and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF \
                and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:
                    matched_contours_idx.append(d2['idx'])

        # append this contour
            matched_contours_idx.append(d1['idx'])

            if len(
                    matched_contours_idx
            ) < MIN_N_MATCHED:  #후보군의 컨투어 갯수가 3보다 작으면 번호판일 확률이 극히 낮으므로 제외 시켜버린다.
                continue

            matched_result_idx.append(matched_contours_idx)  #최종 후보들을 넣어준다.

            unmatched_contour_idx = []
            for d4 in contour_list:
                if d4['idx'] not in matched_contours_idx:
                    unmatched_contour_idx.append(d4['idx'])

            unmatched_contour = np.take(possible_contours,
                                        unmatched_contour_idx)

            # recursive
            recursive_contour_list = find_chars(unmatched_contour)

            for idx in recursive_contour_list:
                matched_result_idx.append(
                    idx)  #최종 후보군이 아닌 것들도 다시한번 검사해보고 살아 남은건 추가한다.

            break

        return matched_result_idx

    result_idx = find_chars(possible_contours)

    matched_result = []
    for idx_list in result_idx:
        matched_result.append(np.take(possible_contours, idx_list))

# visualize possible contours
    temp_result = np.zeros((height, width, channel), dtype=np.uint8)

    #result_idx를 그려보는 부분(최종 후보들을 그려보는 것)
    for r in matched_result:
        for d in r:
            cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
            cv2.rectangle(temp_result,
                          pt1=(d['x'], d['y']),
                          pt2=(d['x'] + d['w'], d['y'] + d['h']),
                          color=(255, 255, 255),
                          thickness=2)

    plt.figure(figsize=(12, 10))
    plt.imshow(temp_result, cmap='gray')
    plt.show()

    PLATE_WIDTH_PADDING = 1.3  # 1.3
    PLATE_HEIGHT_PADDING = 1.5  # 1.5
    MIN_PLATE_RATIO = 3
    MAX_PLATE_RATIO = 10

    plate_imgs = []
    plate_infos = []

    #matched_result를 돌면서 x방향?으로 순차적으로 정렬해준다.
    for i, matched_chars in enumerate(matched_result):
        sorted_chars = sorted(matched_chars, key=lambda x: x['cx'])

        plate_cx = (sorted_chars[0]['cx'] +
                    sorted_chars[-1]['cx']) / 2  #플레이트라고 생각되는 것들의 센터x좌표 구한다.
        plate_cy = (sorted_chars[0]['cy'] +
                    sorted_chars[-1]['cy']) / 2  #플레이트라고 생각되는 것들의 센터y좌표 구한다.

        plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1]['w'] -
                       sorted_chars[0]['x']) * PLATE_WIDTH_PADDING

        sum_height = 0
        for d in sorted_chars:
            sum_height += d['h']

        plate_height = int(sum_height / len(sorted_chars) *
                           PLATE_HEIGHT_PADDING)  #높이 구한다.

        #번호판이라고 생각 되어지는 컨투어들의 배열을 평평하게? 일렬로 나란히 만들기 위해서 하는 작업,
        #아크사인을 이용하여 각도를 구한다.
        triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy']
        triangle_hypotenus = np.linalg.norm(
            np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -
            np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']]))

        angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus))

        rotation_matrix = cv2.getRotationMatrix2D(center=(plate_cx, plate_cy),
                                                  angle=angle,
                                                  scale=1.0)
        #Affine을 통해서 삐뚤어진 이미지를 똑바로 돌릴 수 있다.
        img_rotated = cv2.warpAffine(img_thresh,
                                     M=rotation_matrix,
                                     dsize=(width, height))

        #getRectSubPix를 통해서 번호판 부분만 자른다.
        img_cropped = cv2.getRectSubPix(img_rotated,
                                        patchSize=(int(plate_width),
                                                   int(plate_height)),
                                        center=(int(plate_cx), int(plate_cy)))

        if img_cropped.shape[1] / img_cropped.shape[
                0] < MIN_PLATE_RATIO or img_cropped.shape[
                    1] / img_cropped.shape[
                        0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO:
            continue

        plate_imgs.append(img_cropped)
        plate_infos.append({
            'x': int(plate_cx - plate_width / 2),
            'y': int(plate_cy - plate_height / 2),
            'w': int(plate_width),
            'h': int(plate_height)
        })

        plt.subplot(len(matched_result), 1, i + 1)
        plt.imshow(img_cropped, cmap='gray')
        plt.show()

        longest_idx = -1
        longest_text = 6

    plate_chars = []

    #다시 한번 더 스레쉬 홀딩
    for i, plate_img in enumerate(plate_imgs):
        plate_img = cv2.resize(plate_img, dsize=(0, 0), fx=1.6, fy=1.6)
        _, plate_img = cv2.threshold(plate_img,
                                     thresh=0.0,
                                     maxval=255.0,
                                     type=cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        #한번 더 컨투어 구한다.
        # find contours again (same as above)
        _, contours, _ = cv2.findContours(plate_img,
                                          mode=cv2.RETR_LIST,
                                          method=cv2.CHAIN_APPROX_SIMPLE)

        plate_min_x, plate_min_y = plate_img.shape[1], plate_img.shape[0]
        plate_max_x, plate_max_y = 0, 0

        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)

            area = w * h
            ratio = w / h

            if area > MIN_AREA \
            and w > MIN_WIDTH and h > MIN_HEIGHT \
            and MIN_RATIO < ratio < MAX_RATIO:
                if x < plate_min_x:
                    plate_min_x = x
                if y < plate_min_y:
                    plate_min_y = y
                if x + w > plate_max_x:
                    plate_max_x = x + w
                if y + h > plate_max_y:
                    plate_max_y = y + h
    #번호판의 최대,최소 x,y를 구하게 되면 번호판 부분만. ..
        img_result = plate_img[plate_min_y:plate_max_y,
                               plate_min_x:plate_max_x]

        #가우시안 블러,스레쉬홀드 한번 더 ,padding(여백)을 줘서 pytesseract가 인식을 잘 하도록 한다.
        img_result = cv2.GaussianBlur(img_result, ksize=(3, 3), sigmaX=0)
        _, img_result = cv2.threshold(img_result,
                                      thresh=0.0,
                                      maxval=255.0,
                                      type=cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        img_result = cv2.copyMakeBorder(img_result,
                                        top=10,
                                        bottom=10,
                                        left=10,
                                        right=10,
                                        borderType=cv2.BORDER_CONSTANT,
                                        value=(0, 0, 0))
        global chars
        chars = pytesseract.image_to_string(
            img_result, lang='kor',
            config='--psm 7 --oem 0')  #oem 0을 쓰려면 traindata를 받아야한다?

        # char_cnt = 0

        # for r in chars:
        #     if ord('가') <= ord(r) <= ord('힣'):
        #      char_cnt+= 1
        # if char_cnt > 3 or char_cnt < 1:
        #     continue

        result_chars = ''
        has_digit = False
        for c in chars:
            if ord('가') <= ord(c) <= ord('힣') or c.isdigit(
            ):  #숫자나 한글이 포함되어 있는지 판단하기 위함
                if c.isdigit():
                    has_digit = True
                result_chars += c

    #print(result_chars)

        plate_chars.append(result_chars)

    char_cnt = 0
    char_flag = 0

    for i in range(0, len(plate_chars)):
        for r in plate_chars[i]:
            if ord('가') <= ord(r) <= ord('힣'):
                char_cnt += 1

        if char_cnt == 1:
            if len(plate_chars[i]) > longest_text:
                longest_idx = i
                char_flag = 1

        char_cnt = 0
        plt.subplot(len(plate_imgs), 1, 1)
        plt.imshow(img_result, cmap='gray')


#구한것 중에 가장 문자열이 긴 것을 우리가 찾는 번호판이라고 정한다.

#plt.show()

#사진에서 번호판의 위치랑 예측한 결과를 찍는 부분.
    img_out = img_ori.copy()

    if char_flag == 1:
        info = plate_infos[longest_idx]
        chars = plate_chars[longest_idx]

        cv2.rectangle(img_out,
                      pt1=(info['x'], info['y']),
                      pt2=(info['x'] + info['w'], info['y'] + info['h']),
                      color=(255, 0, 0),
                      thickness=2)

        cv2.imwrite(chars + '.jpg', img_out)

        plt.figure(figsize=(12, 10))
        plt.imshow(img_out)

    elif char_flag == 0:
        chars = '미인식 차량'
        cv2.imwrite(chars + '.jpg', img_out)
    else:
        chars = '미확인 물체'
        cv2.imwrite(chars + '.jpg', img_out)
    print(chars)

    # img_out = img_ori.copy()

    # cv2.rectangle(img_out, pt1=(info['x'], info['y']), pt2=(info['x']+info['w'], info['y']+info['h']), color=(255,0,0), thickness=2)

    # cv2.imwrite(chars + '.jpg', img_out)

    plt.figure(figsize=(12, 10))
    plt.imshow(img_out)
    plt.show()

    MQTT_PATH = "common"
    MQTT_PATH2 = "Image"
    MQTT_SERVER = '210.119.12.77'

    now = time.localtime()
    curr_time = "%04d년 %02d월 %02d일 %02d시 %02d분 %02d초" % (
        now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min,
        now.tm_sec)

    def on_connect(client, userdata, flags, rc):
        print("Coneection OK")

    client = mqtt.Client()
    client.on_connect = on_connect
    client.connect(MQTT_SERVER, 1883)
    client.loop_start()

    client.publish(
        MQTT_PATH,
        json.dumps({
            'Entering Time': curr_time,
            'Licence Number': chars
        },
                   ensure_ascii=False))

    client.loop_stop()

    client.disconnect()

    f = open(chars + '.jpg', "rb")
    fileContent = f.read()
    byteArr = bytearray(fileContent)

    publish.single(MQTT_PATH2, byteArr, hostname=MQTT_SERVER)

    f = open("log.txt", 'a', encoding="UTF8")
    f.write(" Licence Number: " + chars + "  Entering Time: " + curr_time +
            "\n")
    f.close()