Пример #1
0
    def update(self, frame, pos=None, rate=0.125):
        # Crop template image from last position
        (x, y), (w, h) = self.pos if pos is None else pos, self.size
        self.last_img = img = cv2.getRectSubPix(frame, (w, h), (x, y))
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = self.preprocess(img)

        # Correlate, find position of object
        self.last_resp, (dx, dy), self.psr = self.correlate(img)

        # Break if lost tracking (don't update filter)
        self.good = self.psr > 8.0
        if not self.good:
            return

        # Cut out new image based on tracked location
        self.pos = x+dx, y+dy
        self.last_img = img = cv2.getRectSubPix(frame, (w, h), self.pos)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Preprocess, get DFT
        img = self.preprocess(img)
        A = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)

        H1 = cv2.mulSpectrums(self.G, A, 0, conjB=True)  # G x F*
        H2 = cv2.mulSpectrums(     A, A, 0, conjB=True)  # F x F*

        # Get weighted average based on the rate (using the new image)
        self.H1 = self.H1 * (1.0-rate) + H1 * rate
        self.H2 = self.H2 * (1.0-rate) + H2 * rate

        # Update filter
        self.update_kernel()
Пример #2
0
def splitImage(edges, blur, tag="", margin=4):
    h_projection = hprojection(edges)
    v_projection = vprojection(edges)
    top, bottom = cropProjection(h_projection)
    left, right = cropProjection(v_projection)

    # plt.imshow(edges,cmap = 'gray')
    # plt.plot(range(0, len(vprojection)), vprojection, 'r')
    # plt.plot(hprojection, range(0, len(hprojection)), 'b')
    # plt.show()
    regions = splitProjection(v_projection, left, right)
    print tag, left, right, top, bottom
    # print regions
    # print v_projection[1270:1450]
    if len(tag) == 0:
        return regions, left, right, top, bottom
    for region in regions:
        left, leftEnd = region
        if (leftEnd - left) > 220 and (leftEnd - left) < 300:
            width = (leftEnd - left) / 2
            cr_img = cv2.getRectSubPix(
                blur, (width + margin, bottom - top + margin), (width / 2 + left, (top + bottom) / 2)
            )
            cv2.imwrite("crop%s-%d.png" % (tag, left), cr_img)
            cr_img = cv2.getRectSubPix(
                blur, (width + margin, bottom - top + margin), (leftEnd - (width / 2), (top + bottom) / 2)
            )
            cv2.imwrite("crop%s-%d.png" % (tag, left + width), cr_img)
        else:
            cr_img = cv2.getRectSubPix(
                blur, (leftEnd - left + margin, bottom - top + margin), ((leftEnd + left) / 2, (top + bottom) / 2)
            )
            cv2.imwrite("crop%s-%d.png" % (tag, left), cr_img)
    return regions, left, right, top, bottom
Пример #3
0
    def update(self, frame, rate=0.125, img_override=None):
        (x, y), (w, h) = self.pos, self.size

        if img_override is None:
            self.last_img = img = cv2.getRectSubPix(frame, (w, h), (x, y))
        else:
            self.last_img = img = img_override

        img = self.preprocess(img)

        self.last_resp, (dx, dy), self.psr = self.correlate(img)
        self.good = self.psr > self.MIN_PSR

        if not self.good:
            return

        self.pos = x + dx, y + dy
        self.last_img = img = cv2.getRectSubPix(frame, (w, h), self.pos)
        img = self.preprocess(img)

        A = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
        H1 = cv2.mulSpectrums(self.G, A, 0, conjB=True)
        H2 = cv2.mulSpectrums(A, A, 0, conjB=True)

        self.H1 = self.H1 * (1.0 - rate) + H1 * rate
        self.H2 = self.H2 * (1.0 - rate) + H2 * rate

        self.update_kernel()
Пример #4
0
    def crop(self, rect):
        sourceImage = self.getOriginal()
        image = sourceImage.getOpenCV()
        box = cv2.boxPoints(rect)
        box = np.int0(box)

        W = rect[1][0]
        H = rect[1][1]

        Xs = [i[0] for i in box]
        Ys = [i[1] for i in box]
        x1 = min(Xs)
        x2 = max(Xs)
        y1 = min(Ys)
        y2 = max(Ys)

        angle = rect[2]
        # Center of rectangle in source image
        center = ((x1 + x2) / 2, (y1 + y2) / 2)
        # Size of the upright rectangle bounding the rotated rectangle
        size = (x2 - x1, y2 - y1)
        M = cv2.getRotationMatrix2D((size[0] / 2, size[1] / 2), angle, 1.0)
        # Cropped upright rectangle
        cropped = cv2.getRectSubPix(image, size, center)
        cropped = cv2.warpAffine(cropped, M, size)

        croppedRotated = cv2.getRectSubPix(cropped, (int(W), int(H)), (size[0] / 2, size[1] / 2))
        return croppedRotated
Пример #5
0
def normCrossCorrelation(img1, img2, pt0, pt1, status, winsize, method=cv2.cv.CV_TM_CCOEFF_NORMED):
    """
    **SUMMARY**
    
    Calculates normalized cross correlation for every point.
    
    **PARAMETERS**
    
    img1 - Image 1.
    img2 - Image 2.
    pt0 - vector of points of img1
    pt1 - vector of points of img2
    status - Switch which point pairs should be calculated.
             if status[i] == 1 => match[i] is calculated.
             else match[i] = 0.0
    winsize- Size of quadratic area around the point
             which is compared.
    method - Specifies the way how image regions are compared. see cv2.matchTemplate
    
    **RETURNS**
    
    match - Output: Array will contain ncc values.
            0.0 if not calculated.
 
    """
    nPts = len(pt0)
    match = np.zeros(nPts)
    for i in np.argwhere(status):
        i = i[0]
        patch1 = cv2.getRectSubPix(img1,(winsize,winsize),tuple(pt0[i]))
        patch2 = cv2.getRectSubPix(img2,(winsize,winsize),tuple(pt1[i]))
        match[i] = cv2.matchTemplate(patch1,patch2,method)
    return match
Пример #6
0
    def removeBackground(self,img,x,y,w,h,postit):   
        mask = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
        cv2.drawContours( mask, [postit], -1, (255,255,255),-1)

        img2 = cv2.getRectSubPix(img, (w, h), (x+w/2, y+h/2))
        postitMask = cv2.getRectSubPix(mask, (w, h), (x+w/2, y+h/2)) 
        img2channels = cv2.split(img2)
        postitMaskChannels = cv2.split(postitMask)
        
        img2channels.append(postitMaskChannels.pop())
        
        img2 = cv2.merge(img2channels)

        return img2
 def normCrossCorrelation(self, img1, img2, points1, points2):
     i = 0
     chelper = CHelper()
     while i < len(self.points):
         if self.status[i] == 1:
             rec0 = cv2.getRectSubPix(img1, (10, 10), (points1[i][0][0], points1[i][0][1]))
             rec1 = cv2.getRectSubPix(img2, (10, 10), (points2[i][0][0], points2[i][0][1]))
             res = cv2.matchTemplate(rec0, rec1, eval('cv2.TM_CCOEFF_NORMED'))
             str = np.getbuffer(res)
             io = StringIO.StringIO(str)
             testnum = chelper.getHelpNumber(io.getvalue())
             self.err[i] = testnum
         else:
             self.err[i] = 0.0
         i += 1
Пример #8
0
def corner_featureVector(corners, image, block_size):
    if DEBUG == 1:
        print sys._getframe().f_code.co_name

    # convert image1 to gray
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = np.float32(gray)

    win_w = block_size * 2 + 1
    win_h = block_size * 2 + 1

    # column means: -180 -135 -90 -45 0 45 90 135 180
    # row means: corner

    degree_histogram = np.zeros((len(corners), 9, 1), dtype=np.uint8)

    for i in range(0, len(corners)):
        im = cv2.getRectSubPix(gray, (win_w, win_h), (corners[i][0], corners[i][1]))

        # 1st derivative of image
        dx = cv2.Sobel(im, cv2.CV_64F, 1, 0, ksize=5)
        dy = cv2.Sobel(im, cv2.CV_64F, 0, 1, ksize=5)

        angle = np.arctan2(dy, dx)

        # range from -180 to 180
        # -180 <= angle < -135
        # -135 <= angle < -90
        # -90 <= angle < -45
        # ''''''

        for j in range(0, 9):
            degree_histogram[i][j] = (((math.pi / 4 * (j - 4)) <= angle) & (angle < (math.pi / 4 * (j - 3)))).sum()

    return degree_histogram
Пример #9
0
def extract_features_from_img(img, kmeans, patch_size=7):
    k = kmeans.get_params()['n_clusters']
    h,w,c = img.shape
    offset = patch_size/2
    vectors = []
    indices = []
    for y in xrange(offset, h-offset):
        for x in xrange(offset, w-offset):
            patch = cv2.getRectSubPix(img, (patch_size, patch_size), (x,y))
            vector = vectorizer.extract_vector_from_patch(patch)
            vectors.append(vector)
            idx = (2*(2*y/h)+(2*x/w))*k
            indices.append(idx)

    feature = [0 for _ in xrange(4*k)]
    distances = kmeans.transform(vectors)
    for dist, idx in zip(distances, indices):
        bin = dist.argmin()
        min_dist = dist.min()
        mean = dist.mean()
        strength = max(0, mean - min_dist)
        feature[idx+bin] += strength
    feature = numpy.array(feature)
    feature /= feature.mean()
    feature /= feature.std()
    return feature
Пример #10
0
def render(img,sqNum):
	print 'in render'
	copy = img.copy()
	
	#resize the copy
	#while (copy.shape[0]>900 or copy.shape[1]>900):
	#	copy = cv2.pyrDown(copy)

	#paint the grid
	copyY,copyX = copy.shape[:2]
	for i in range(1,sqNum-1):
		ptX=i*copyX/sqNum
		cv2.line(copy, (ptX,0), (ptX,copyY), (255,0,0), thickness=1)
		ptY=i*copyY/sqNum
		cv2.line(copy, (0,ptY), (copyX,ptY), (255,0,0), thickness=1)
	
	#get the histograms of the corresponding tiles
	l = copyX/(2*sqNum)
	h = copyY/(2*sqNum)
	squares = []
	for i in range(0,sqNum-1):
		aux = []
		for j in range(0,sqNum-1):
			center = (i*copyX/sqNum+l,j*copyY/sqNum+h)
			aux.append(getHist(cv2.getRectSubPix(img, (l,h), center))[0])
		squares.append(aux)
	return copy,squares
Пример #11
0
def fixedPositonBySignet(srcImg):
    h, w = srcImg.shape[:2]
    img = cv2.cvtColor(srcImg, cv2.COLOR_RGB2HSV)
    img = cv2.inRange(img, np.array((90, 60, 50), dtype=np.uint8), np.array((140, 255, 255), dtype=np.uint8))
    contours0, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours0:
        if len(cnt) < 5:
            continue
        ellipse = cv2.fitEllipse(cnt)
        center, r, ang = ellipse
        # 角度精确读不足,使用直线检测来确定偏转角度
        chileIm = cv2.getRectSubPix(srcImg, (400, 100), center)
        new_img = cv2.cvtColor(chileIm, cv2.COLOR_RGB2GRAY)
        new_img = cv2.GaussianBlur(new_img, (3, 3), 0)  
        edges = cv2.Canny(new_img, 50, 150, apertureSize=3)  
        lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)
        if lines is not None:
            for line in lines[0]:  
                rho = line[0]  # 第一个元素是距离rho  
                theta = line[1]  # 第二个元素是角度theta  
                if  (theta > (np.pi / 4.)) or (theta < (3.*np.pi / 4.0)):  
                    # 该直线与第一列的交点  
                    pt1 = (0, int(rho / np.sin(theta)))  
                    # 该直线与最后一列的交点  
                    pt2 = (chileIm.shape[1], int((rho - chileIm.shape[1] * np.cos(theta)) / np.sin(theta)))  
                    M = (pt2[1] - pt1[1]) * 1.0 / (pt2[0] - pt1[0])
                    pi_angle = math.atan(M)
                    ang = pi_angle * 180 / np.pi
                    break
        else :
            ang = ang - 270
        if r[0] < 30 or r[1] < 50 or center[1] > h / 2:
            continue
        rate = r[1] / 175.8019256591797
        return center, rate, ang
Пример #12
0
    def __init__(self, frame, rect):
        x1, y1, x2, y2 = rect # Extract coordinates of the rectangle to be tracked
        w, h = map(cv2.getOptimalDFTSize, [x2 - x1, y2 - y1])
        x1, y1 = (x1 + x2 - w) // 2, (y1 + y2 - h) // 2

        # 0.5 * x \in [w, h] (-1) = the centre point of the region
        self.pos = x, y = x1 + 0.5 * (w - 1), y1 + 0.5 * (h - 1)
        self.size = w, h

        img = cv2.getRectSubPix(frame, (w, h), (x, y))

        # Hanning Window
        # http://en.wikipedia.org/wiki/Window_function
        # http://en.wikipedia.org/wiki/Window_function#Hann_.28Hanning.29_window
        self.win = cv2.createHanningWindow((w, h), cv2.CV_32F)

        g = np.zeros((h, w), np.float32)
        g[h // 2, w // 2] = 1
        g = cv2.GaussianBlur(g, (-1, -1), 2.0)
        g /= g.max()

        self.G = cv2.dft(g, None, cv2.DFT_COMPLEX_OUTPUT)
        self.H1 = np.zeros_like(self.G)
        self.H2 = np.zeros_like(self.G)

        for i in xrange(128):
            a = self.preprocess(rnd_warp(img))
            A = cv2.dft(a, None, cv2.DFT_COMPLEX_OUTPUT)

            self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)
            self.H2 += cv2.mulSpectrums(A, A, 0, conjB=True)

        self.update_kernel()
        self.update(frame)
        self.id = id(self)
Пример #13
0
def cropImage(edges, blur):
    contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # borders = find_border_components(contours, edges)
    # borders.sort(key=lambda (i, x1, y1, x2, y2): (x2 - x1) * (y2 - y1))
    # count=len(borders)
    # print len(contours),count
    # for i in range(0,count):
    #    index, left,top,right,bottom =borders[i]
    #    img_crop=cv2.getRectSubPix(img, (right-left, bottom-top), ((left+right)/2, (top+bottom)/2))
    #    cv2.imwrite('1right%d' % (left) +'.png', img_crop)

    # iand = cv2.bitwise_and(img,img,mask=edges)
    # contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # cv2.drawContours(edges,contours,-1,(255,255,255),-1)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50, 50))
    closed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
    # closed = cv2.erode(closed, None, iterations = 4)
    closed = cv2.dilate(closed, None, iterations=13)
    # cv2.imwrite('closed.png',closed)
    (cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    for i in range(0, 1):
        if len(cnts) == 0:
            break
        if i == 1 and len(cnts) == 1:
            break
        c = cnts[i]
        # compute the rotated bounding box of the largest contour
        rect = cv2.minAreaRect(c)
        box = np.int0(cv2.cv.BoxPoints(rect))
        # cv2.drawContours(img, [box], -1, (0, 255, 0), 3)
        left, top, right, bottom = caculateRect(box)
        img_crop = cv2.getRectSubPix(blur, (right - left, bottom - top), ((left + right) / 2, (top + bottom) / 2))
        cv2.imwrite("right%d" % (left) + ".png", img_crop)
Пример #14
0
def getCentralRect(img):
	y = img.shape[0]
	x = img.shape[1]

	center = (int(x/2),int(y/2))
	patchSize = (int(x/3),int(y/3))
	return cv2.getRectSubPix(img, patchSize, center)
Пример #15
0
def processOneLineImage(gray_img, iTag):
    (_, img) = cv2.threshold(gray_img, 110, 255, cv2.THRESH_BINARY_INV)
    img = img[:, 2 : img.shape[1] - 2]
    scale = psegutils.estimate_scale(img)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))
    closed = cv2.dilate(img, kernel, iterations=1)
    edges = cv2.Canny(closed, 60, 300)
    contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(edges, contours, -1, (255, 255, 255), 1)
    # cv2.imwrite('edges%s.png' % iTag,edges)
    boxmap = psegutils.compute_boxmap(img, scale, threshold=(0.4, 10), dtype="B")
    # combineBoxmap(boxmap)
    cv2.imwrite("box%s.png" % iTag, boxmap * 255)
    h_projection = hprojection(boxmap * 255)
    top, bottom = cropProjection(h_projection)
    regions = splitProjection(h_projection, top, bottom, 30, 2)
    # print iTag, top,bottom
    # print regions
    # print v_projection[1270:1450]
    if len(iTag) == 0:
        return regions, top, bottom
    for region in regions:
        topStart, TopEnd = region
        cr_img = cv2.getRectSubPix(
            gray_img, (gray_img.shape[1] - 4, TopEnd - topStart + 8), (gray_img.shape[1] / 2, (TopEnd + topStart) / 2)
        )
        cv2.imwrite("%sx%d.png" % (iTag, topStart), cr_img)
    return regions, top, bottom
Пример #16
0
    def __init__(self, frame, rect, number):
        self.num = number
        x1, y1, x2, y2 = rect
        w, h = map(cv2.getOptimalDFTSize, [x2-x1, y2-y1])
        x1, y1 = (x1+x2-w)//2, (y1+y2-h)//2
        self.pos = x, y = x1+0.5*(w-1), y1+0.5*(h-1)
        self.size = w, h
        img = cv2.getRectSubPix(frame, (w, h), (x, y))

        self.win = cv2.createHanningWindow((w, h), cv2.CV_32F)
        g = np.zeros((h, w), np.float32)
        g[h//2, w//2] = 1
        g = cv2.GaussianBlur(g, (-1, -1), 2.0)
        g /= g.max()

        self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT)
        self.H1 = np.zeros_like(self.G)
        self.H2 = np.zeros_like(self.G)
        for i in xrange(128):
            a = self.preprocess(MOSSE.rnd_warp(img))
            A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT)
            self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)
            self.H2 += cv2.mulSpectrums(     A, A, 0, conjB=True)
        self.update_kernel()
        self.update(frame)
Пример #17
0
def cutTaxPayer(img):
    new_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    new_img = cv2.adaptiveThreshold(new_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 9 , 9)
    contours0, hierarchy = cv2.findContours(new_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    H, W = img.shape[:2]
    result = np.zeros((H, W), np.uint8)
    for i in range(len(contours0) - 1, -1, -1):
        cnt = contours0[i]
        x, y, w, h = cv2.boundingRect(cnt)
        if w < 10 or h < 10:
            contours0.pop(i)
        else :
            cv2.rectangle(result, (x, y), (x + w, y + h), (255), 1)
            
    for h in range(H):
        for w in range(W):
            if  result[h][w] == 255:
                cv2.line(result, (0, h), (W, h), (255))
                break
    contours1, hierarchy1 = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # for cnt in contours1:
    #     x, y, w, h = cv2.boundingRect(cnt)  
    #     cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 1)
    #     print x, y
    # cv2.imshow("xxx1", img)
    if len(contours1) < 2:
        return np.ones((H / 4, W * 3 / 4, 3), np.uint8) * 255 
    x, y, w, h = cv2.boundingRect(contours1[-2])  
    conter = (x + w / 2, y + h / 2)
    img = cv2.getRectSubPix(img, (w, h), conter)
    return img
Пример #18
0
def imagedistort(img):
    '''
    distort an image
        shift horizontally and vertically
        rotated clockwise and anticlockwise
    '''
    shift_range = 0.05
    rotate_range = 0.02

    h, w = img.shape
    size = max(w, h) *2
    normal = 255 * np.ones((size, size), np.uint8)
    normal[(size - h) / 2: (size + h) / 2, (size - w) / 2: (size + w) / 2] = img

    # rotate
    degree = 90 * random.uniform(-rotate_range, rotate_range)
    M = cv2.getRotationMatrix2D((size/2, size/2), degree, 1)
    rotated = cv2.warpAffine(normal, M, (size, size))

    # shift
    shift_value_x = size / 2 * random.uniform(-shift_range, shift_range)
    shift_value_y = size / 2 * random.uniform(-shift_range, shift_range)
    M = np.float32([[1, 0, shift_value_x], [0, 1, shift_value_y]])
    shift = cv2.warpAffine(rotated, M, (size, size))
    # crop
    center = (size / 2, size / 2)
    crop = cv2.getRectSubPix(shift, (w, h), center) 

    return crop
Пример #19
0
def get_sub_image(image, x, y):
    dx = float(configs.SIZE-configs.LSTEP-configs.RSTEP) / 15
    dy = float(configs.SIZE-configs.BSTEP-configs.TSTEP) / 15
    xp = float(configs.LSTEP) + dx * x
    yp = float(configs.TSTEP) + dy * y

    return cv2.getRectSubPix(image, (int(dx + configs.PATCH_EXPAND), int(dy + configs.PATCH_EXPAND)), (int(xp + dx/2), int(yp + dy/2))) 
Пример #20
0
    def __init__(self, frame, rect):
        x1, y1, x2, y2 = rect
        self.using_lk = False
        self.frameWidth = frame.shape[1]
        self.frameHeight = frame.shape[0]

        w, h = map(cv2.getOptimalDFTSize, [x2-x1, y2-y1])
        x1, y1 = (x1+x2-w)//2, (y1+y2-h)//2
        self.pos = x, y = x1+0.5*(w-1), y1+0.5*(h-1)
        self.size = w, h
        self.org_size = w,h
        img = cv2.getRectSubPix(frame,  (w, h), (x, y))

        #self.win = cv2.createHanningWindow((w, h), cv2.CV_32F)
        g = np.zeros((h, w), np.float32)
        g[h//2, w//2] = 1
        g = cv2.GaussianBlur(g, (-1, -1), 2.0)
        g /= g.max()

        self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT)
        #print "init G",self.G.shape
        self.H1 = np.zeros_like(self.G)
        self.H2 = np.zeros_like(self.G)
        for i in xrange(128):
            a = self.preprocess(rnd_warp(img),self.size)
            A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT)
            self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)
            self.H2 += cv2.mulSpectrums(     A, A, 0, conjB=True)

        #print "init imgF:",A.shape
        #print "init H1",self.H1.shape
        #print "init H2",self.H2.shape
        self.update_kernel()
Пример #21
0
def get_card_texture(card, square=20):

    binary = get_binary(card, thresh=150)
    contours = find_contours(binary)

    if len(contours) < 2:
        return None

    contour = contours[1]

    # get bounding rectangle
    rect = cv2.boundingRect(contour)
    x, y, w, h = rect

    rect = cv2.getRectSubPix(card, (square, square), (x + w / 2, y + h / 2))

    gray_rect = cv2.cvtColor(rect, cv2.COLOR_RGB2GRAY)
    pixel_std = np.std(gray_rect)

    if pixel_std > 4.5:
        return sc.PROP_TEXTURE_STRIPED

    elif np.mean(gray_rect) > 150:
        return sc.PROP_TEXTURE_EMPTY

    else:
        return sc.PROP_TEXTURE_SOLID
Пример #22
0
def get_subimage(image, first_anchor, second_anchor):
    (fax, fay) = first_anchor
    (sax, say) = second_anchor
    width = abs(fax - sax) + 1
    height = abs(fay - say) + 1
    center = ((fax + sax) / 2.0, (fay + say) / 2.0)
    subimage = cv2.getRectSubPix(image, (width, height), center)
    return subimage
def get_cube_upright():
    # Uses the depth image to only take the part of the image corresponding to the closest point and a bit further
    global depth_img_avg
    global img_bgr8_clean
    closest_pnt = np.amin(depth_img_avg)
    # resize the depth image so it matches the color one
    depth_img_avg = cv2.resize(depth_img_avg, (1280, 960))
    # generate a mask with the closest points
    img_detection = np.where(depth_img_avg < closest_pnt + val_depth_capture, depth_img_avg, 0)
    # put all the pixels greater than 0 to 255
    ret, mask = cv2.threshold(img_detection, 0.0, 255, cv2.THRESH_BINARY)
    # convert to 8-bit
    mask = np.array(mask, dtype=np.uint8)
    im2, contours, hierarchy = cv2.findContours(mask, 1, 2, offset=(0, -6))
    useful_cnts = list()
    uprightrects = list()
    img_bgr8_clean_copy = img_bgr8_clean.copy()
    for cnt in contours:
        if 9000 < cv2.contourArea(cnt) < 15000:
            if 420 < cv2.arcLength(cnt, 1) < 560:
                useful_cnts.append(cnt)
            else:
                print("Wrong Lenght 450 < " + str(cv2.arcLength(cnt, 1)) + str(" < 570"))
        else:
            print ("Wrong Area: 9000 < " + str(cv2.contourArea(cnt)) + " < 15000")
    for index, cnts in enumerate(useful_cnts):
        min_area_rect = cv2.minAreaRect(cnts)  # minimum area rectangle that encloses the contour cnt
        (center, size, angle) = cv2.minAreaRect(cnts)
        width, height = size[0], size[1]
        if not (0.7*height < width < 1.3*height):
            print("Wrong Height/Width: " + str(0.7*height) + " < " + str(width) + " < " + str(1.3*height))
            continue
        points = cv2.boxPoints(min_area_rect)  # Find four vertices of rectangle from above rect
        points = np.int32(np.around(points))  # Round the values and make it integers
        cv2.drawContours(img_bgr8_clean_copy, [points], 0, (0, 0, 255), 2)
        cv2.drawContours(img_bgr8_clean_copy, cnts, -1, (255, 0, 255), 2)
        cv2.waitKey(1)
        # if we rotate more than 90 degrees, the width becomes height and vice-versa
        if angle < -45.0:
            angle += 90.0
            width, height = size[0], size[1]
            size = (height, width)
        rot_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
        # rotate the entire image around the center of the parking cell by the
        # angle of the rotated rect
        imgwidth, imgheight = (img_bgr8_clean.shape[0], img_bgr8_clean.shape[1])
        rotated = cv2.warpAffine(img_bgr8_clean, rot_matrix, (imgheight, imgwidth), flags=cv2.INTER_CUBIC)
        # extract the rect after rotation has been done
        sizeint = (np.int32(size[0]), np.int32(size[1]))
        uprightrect = cv2.getRectSubPix(rotated, sizeint, center)
        uprightrects.append(uprightrect)
        uprightrect_copy = uprightrect.copy()
        cv2.drawContours(uprightrect_copy, [points], 0, (0, 0, 255), 2)
        cv2.imshow('uprightRect ' + str(index), uprightrect_copy)

    cv2.imshow('RBG', img_bgr8_clean_copy)
    cv2.waitKey(1)
    objects_detector(uprightrects)
Пример #24
0
def extract_vectors_from_img(img,patch_size=7):
    img_size = img.shape
    offset = patch_size/2
    features = []
    for y in xrange(offset, img_size[0]-offset):
        for x in xrange(offset, img_size[1]-offset):
            patch = cv2.getRectSubPix(img, (patch_size, patch_size), (x,y))
            features.append(extract_vector_from_patch(patch))
    return features
Пример #25
0
 def track(self, frame, pos=None):
     (x, y), (w, h) = self.pos, self.size
     if pos is not None:
         (x,y) = pos
     self.last_img = img = cv2.getRectSubPix(frame, (w, h), (x, y))
     img = self.preprocess(img)
     self.last_resp, (dx, dy), self.psr = self.correlate(img)
     self.good = self.psr > 8.0
     return np.array([dx, dy])
def subimage(image, centre, theta, width, height):
   	#output_image = np.zeros((height,width,3), np.uint8)
   	#mapping = np.array([[np.cos(theta), -np.sin(theta), centre[0]],
         #              [np.sin(theta), np.cos(theta), centre[1]]])
	mapping = cv2.getRotationMatrix2D(centre,theta,1.0)
   #map_matrix_cv = cv2.fromarray(mapping)
   #cv.GetQuadrangleSubPix(image, output_image, map_matrix_cv)
	image = cv2.warpAffine(image,mapping,image[:,:,0].shape,flags=cv2.INTER_LINEAR)
	output_image = cv2.getRectSubPix(image,(height,width),centre)   
	return output_image
Пример #27
0
def deskew(image, angle):
    print angle
    image = cv2.bitwise_not(image)
    non_zero_pixels = cv2.findNonZero(image)
    center, wh, theta = cv2.minAreaRect(non_zero_pixels)

    root_mat = cv2.getRotationMatrix2D(center, angle, 1)
    rows,cols = image.shape[:2]
    rotated = cv2.warpAffine(image, root_mat, (cols, rows), flags=cv2.INTER_CUBIC)
    return cv2.bitwise_not(cv2.getRectSubPix(rotated, (cols, rows), center))
Пример #28
0
def take_photo(argv):
    face_found = False
    
    #Which type? Hand or Face
    which_cam = 1
    name = "face"
    if argv:
        if argv[0] == "hand":
            which_cam = 0
            name = "hand"

    cascade = cv2.CascadeClassifier(cascade_fn)
    cam = create_capture(which_cam)
            
    while True:
        ret, img = cam.read()
        if name == "face" :
            # Do a little preprocessing:
            img_copy = cv2.resize(img, (img.shape[1]/2, img.shape[0]/2))
            gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)
            # Detect the faces (probably research for the options!):
            rects = cascade.detectMultiScale(gray)
            # Make a copy as we don't want to draw on the original image:
            for x, y, width, height in rects:
                if 475 < x < 775 : 
                    #cv2.rectangle(img_copy, (int(x-width*0.35), int(y-height/2)), (int(x+width*1.5), int(y+height*1.5)), (255,0,0), 2)
                    #Save face
                    cropped = cv2.getRectSubPix(img, (int(width*2)*2, int(height*2)*2), (int(x+width/2)*2, int(y+height/2)*2))
                    cv2.imwrite("static/captured/" + name + argv[1] + ".png", cropped)
                    face_found = True

            #cv2.imshow('facedetect', cropped)
            if cv2.waitKey(20) == 27:
                break
            if face_found:
                break
        else:
            #920 x 730 at 420, 60
            cropped = cv2.getRectSubPix(img, (920,730), (int(420+920/2), int(60+730/2)))
            cv2.imwrite("static/captured/" + name + argv[1] + ".png", cropped)
            break
Пример #29
0
def get_neighbors(noisy, sz):
    """
    Get sz by sz neighbors and for each pixel value
    Note: If grayscale, this will be sz x sz, if RGB, will be sz x sz x 3
    Use grayscale
    """
    window = (sz, sz)
    neighbors = [cv2.getRectSubPix(noisy, window, (y, x)).ravel() \
        for x, y in itertools.product(range(noisy.shape[0]), range(noisy.shape[1]))]
    neighbors = np.asarray(neighbors)
    return (neighbors / 255.0).astype('float32')        
Пример #30
0
	def onmouse(event, x, y, flags, param):
		showImg[:,:,:] = img
		center = (x,y)
		pt1 = (x-50,y-50)
		pt2 = (x+50,y+50)
		cv2.rectangle(showImg, pt1, pt2, (0,0,255), thickness=2)
		patch = cv2.pyrUp(cv2.getRectSubPix(img, (100,100), center))
		canvas[0:200,0:200,:]=patch
		canvas[210:466,0:256,:]=getHistogram(patch)
		if flags & cv2.EVENT_FLAG_LBUTTON:
			history.append(canvas.copy())
Пример #31
0
 def onmouse(event, x, y, flags, param):
     h, w = img.shape[:2]
     h1, w1 = small.shape[:2]
     x, y = 1.0 * x * h / h1, 1.0 * y * h / h1
     zoom = cv2.getRectSubPix(img, (800, 600), (x + 0.5, y + 0.5))
     cv2.imshow('zoom', zoom)
            index.append(j)

for mask_no in list(set(range(len(validated_masklist))) - set(index)):
    final_maskist.append(validated_masklist[mask_no])

for mask in final_maskist:
    contour = np.argwhere(mask.transpose() == 255)
    rect = cv2.minAreaRect(contour)
    width = int(rect[1][0])
    height = int(rect[1][1])
    centre = (int(rect[0][0]), int(rect[0][1]))
    box = cv2.cv.BoxPoints(rect)
    box = np.int0(box)

    if ((width / float(height) > 1)):
        cropped_image = cv2.getRectSubPix(image_mask, (width, height), centre)
    else:
        cropped_image = cv2.getRectSubPix(image_mask, (height, width), centre)

    cropped_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)
    cropped_image = cv2.equalizeHist(cropped_image)
    cropped_image = cv2.resize(cropped_image, (260, 63))
    cropped_images.append(cropped_image)

#self trained Haar classifier with approx 130 positive images and 50 negative images
number_plate = cv2.CascadeClassifier('output.xml')

index = 0
max_area = 2000

for i in range(len(cropped_images)):
Пример #33
0
        #print('O')
        #print(largest[2])

        for i in circles[0, :]:
            if i[2] > largest[2]:
                largest = i
        #cv2.circle(whiteMask, (largest[0], largest[1]), largest[2]-5, (0,0,0), -1)

        #for i in circles[0,:]:
        #   cv2.circle(whiteMask, (i[0], i[1]), i[2], (0,0,0), -1)

    #whiteMask = cv2.cvtColor(whiteMask,cv2.COLOR_BGR2GRAY)

    #masked = cv2.scaleAdd(whiteMask, 1, medianGray)

    last = cv2.getRectSubPix(medianGray, (3 * largest[2], 3 * largest[2]),
                             (largest[0], largest[1]))
    _, thr = cv2.threshold(last, 160, 255, cv2.THRESH_BINARY)

    Xcoord, Ycoord, Radius = largest[0], largest[1], largest[2] + 10
    H, W = thr.shape
    # x and y coordinates per every pixel of the image
    x, y = np.meshgrid(np.arange(W), np.arange(H))
    # squared distance from the center of the circle
    d2 = (x - Xcoord)**2 + (y - Ycoord)**2
    # mask is True inside of the circle
    mask = d2 < Radius**2

    draw_circle(frame, largest[2], largest[0], largest[1])
    cv2.imshow('frame', frame)

    outside = np.ma.masked_where(mask, thr)
            width=int(rect[1][0])
            height=int(rect[1][1])
            centre=(int(rect[0][0]), int(rect[0][1]))
            size = (height, width)
            angle = int(rect[2])
            if angle < -45 :
                angle += 90
                size = (width,height)
        #     cv2.drawContours(imgtest,[rect],0,(0,0,255),2)
            rot = cv2.getRotationMatrix2D(centre,angle,1)
            dim = imgtest.shape
            height1 = imgtest.shape[0]
            width1 = imgtest.shape[1]
            # print dim
            orig_rect = cv2.warpAffine(imgtest,rot,(height1,width1))
            cropped_image = cv2.getRectSubPix(orig_rect,(height,width),centre)
            cropped_image=cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)
            cropped_image=cv2.equalizeHist(cropped_image)
            cropped_image = cv2.resize(cropped_image,(260,63))
            orig_rects.append(cropped_image)
            # showfig(cropped_image,plt.get_cmap('gray'))
            images.append(imgtest)

        #     orig_rect = cv2.resize(orig_rect,(63,260))
        #     plt.imshow(orig_rect,plt.get_cmap('gray'))
        #     print size,centre,anglea
        # ppc = (8,8)
        # cpb = (3,3)
        final_images = []
        hog_image = []
        if (len(orig_rects)>0):
Пример #35
0
    def update(self,current_frame,vis=False):
        self.frame_index+=1
        old_pos=(np.inf,np.inf)
        iter=1
        while iter<=self.refinement_iterations and np.any(np.array(old_pos)!=np.array(self._center)):
            patch = cv2.getRectSubPix(current_frame,(int(self.base_target_sz[0]*self.sc*(1+self.padding)),
                                                     int(self.base_target_sz[1]*self.sc*(1+self.padding))), self._center)
            patch=cv2.resize(patch,self.win_sz).astype(np.uint8)
            xo_hog,xo_cn= self.get_features(patch,self.cell_size)
            xo_cn2, xo_hog2 = self.feature_projection(xo_cn, xo_hog, self.projection_matrix_cn, self.projection_matrix_hog,
                                                    self._window)
            detect_k_cn=self.dense_gauss_kernel(self.z_cn2,xo_cn2,self.cn_sigma)
            detect_k_hog=self.dense_gauss_kernel(self.z_hog2,xo_hog2,self.hog_sigma)
            kf=fft2(self.d[0]*detect_k_cn+self.d[1]*detect_k_hog)
            responsef=self.alphaf*np.conj(kf)
            if self.interpolate_response>0:
                if self.interpolate_response==2:
                    self.interp_sz=(int(self.yf.shape[1]*self.cell_size*self.sc),
                               int(self.yf.shape[0]*self.cell_size*self.sc))
                else:
                    responsef=self.resize_dft2(responsef,self.interp_sz)
            response=np.real(ifft2(responsef))
            if vis is True:
                self.score = response
                self.score = np.roll(self.score, int(np.floor(self.score.shape[0] / 2)), axis=0)
                self.score = np.roll(self.score, int(np.floor(self.score.shape[1] / 2)), axis=1)
                self.crop_size=self.win_sz

            row,col=np.unravel_index(np.argmax(response, axis=None),response.shape)
            disp_row=np.mod(row+np.floor((self.interp_sz[1]-1)/2),self.interp_sz[1])-np.floor((self.interp_sz[1]-1)/2)
            disp_col=np.mod(col+np.floor((self.interp_sz[0]-1)/2),self.interp_sz[0])-np.floor((self.interp_sz[0]-1)/2)
            if self.interpolate_response==0:
                translation_vec=list(np.array([disp_row,disp_col])*self.cell_size*self.sc)
            elif self.interpolate_response==1:
                translation_vec=list(np.array([disp_row,disp_col])*self.sc)
            elif self.interpolate_response==2:
                translation_vec=[disp_row,disp_col]
            trans=np.sqrt(self.win_sz[0]*self.win_sz[1])*self.sc/3
            old_pos=self._center
            self._center=(old_pos[0]+translation_vec[1],old_pos[1]+translation_vec[0])
            iter+=1

        self.sc = self.scale_estimator.update(current_frame, self._center, self.base_target_sz,
                                                                self.sc)
        if self.scale_type == 'normal':
            self.sc = np.clip(self.sc, a_min=self._min_scale_factor,
                                                a_max=self._max_scale_factor)

        patch = cv2.getRectSubPix(current_frame, (int(self.base_target_sz[0] * self.sc * (1 + self.padding)),
                                                  int(self.base_target_sz[1] * self.sc * (1 + self.padding))),
                                  self._center)
        patch = cv2.resize(patch, self.win_sz).astype(np.uint8)
        xo_hog,xo_cn=self.get_features(patch,self.cell_size)
        self.z_hog=(1-self.lr_hog)*self.z_hog+self.lr_hog*xo_hog
        self.z_cn=(1-self.lr_cn)*self.z_cn+self.lr_cn*xo_cn

        data_matrix_cn = self.z_cn.reshape((-1, self.z_cn.shape[2]))
        pca_basis_cn, _, _ = np.linalg.svd(data_matrix_cn.T.dot(data_matrix_cn))
        self.projection_matrix_cn = pca_basis_cn[:, :self.num_compressed_dim_cn]

        data_matrix_hog = self.z_hog.reshape((-1, self.z_hog.shape[2]))
        pca_basis_hog, _, _ = np.linalg.svd(data_matrix_hog.T.dot(data_matrix_hog))
        self.projection_matrix_hog = pca_basis_hog[:, :self.num_compressed_dim_hog]

        self.z_cn2, self.z_hog2 = self.feature_projection(self.z_cn, self.z_hog, self.projection_matrix_cn, self.projection_matrix_hog,
                                                  self._window)
        if self.frame_index%self.modnum==0:
            self.train_model()
        target_sz=((self.base_target_sz[0]*self.sc),(self.base_target_sz[1]*self.sc))
        return [(self._center[0] - target_sz[0] / 2), (self._center[1] - target_sz[1] / 2), target_sz[0],target_sz[1]]
Пример #36
0
    def ExtractTile(self):

        height, width, depth = self.tile.shape

        # Contour on Frame Boundary
        img = np.zeros((height, width, 3), np.uint8)
        cv2.rectangle(img, (0, 0), (width, height), (0, 0, 255), 2)
        contour_image = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
        contours_frame, hierarchy = cv2.findContours(
            contour_image, cv2.cv.CV_RETR_EXTERNAL,
            cv2.cv.CV_CHAIN_APPROX_SIMPLE)
        #cv2.drawContours(img, contours_frame, -1, (0, 255, 0), 3 )
        #cv2.imshow("Contour_Frame",img)

        #Contour on Tiles
        grey_image = cv2.cvtColor(self.tile, cv2.cv.CV_BGR2GRAY)
        thresh = cv2.threshold(grey_image, 0, 255,
                               cv2.cv.CV_THRESH_BINARY + cv2.cv.CV_THRESH_OTSU)
        canny_image = cv2.Canny(grey_image, thresh[0], thresh[0] * 3)
        contours, hierarchy = cv2.findContours(canny_image,
                                               cv2.cv.CV_RETR_EXTERNAL,
                                               cv2.cv.CV_CHAIN_APPROX_NONE)
        length = len(contours)  # check for tile-less frames
        if length:
            # Find the index of the largest contour
            areas = [cv2.contourArea(c) for c in contours]
            #areas = [cv2.arcLength(c,True) for c in contours]
            max_index = np.argmax(areas)
            #cv2.drawContours(self.tile,contours[max_index], -1, (255, 0, 0), 3 )
            #cv2.imshow("Contour_Approx", self.tile)
            # To find indices of rotated rectangle
            bound_points = cv2.minAreaRect(contours[max_index])
            #print (type(bound_points)) -->((c0,c1),(w,h),theta)
            #print bound_points[0][0],bound_points[0][1],bound_points[1][0],bound_points[1][1]
            #print bound_points[2]
            c0 = bound_points[0][0]
            c1 = bound_points[0][1]
            w = bound_points[1][0]
            h = bound_points[1][1]
            theta = (bound_points[2])

            # Tile Boundness - In Frame - Test
            self.boundnessflag = 1
            self.areaflag = 1
            # Test For maxtile area

            if (self.firstframe == True):
                self.MaxAreaTile = areas[max_index]
                self.firstframe = 0
            else:
                if (areas[max_index] < self.MaxAreaTile):
                    self.areaflag = 0
                else:
                    self.areaflag = 1
                    self.MaxAreaTile = areas[max_index]

            # Test Using boundingRect
            x_br, y_br, w_br, h_br = cv2.boundingRect(contours[max_index])
            #print x_br,y_br,w_br,h_br
            ret_val_1 = cv2.pointPolygonTest(contours_frame[0], (x_br, y_br),
                                             False)
            ret_val_2 = cv2.pointPolygonTest(contours_frame[0],
                                             (x_br, (y_br + w_br)), False)
            ret_val_3 = cv2.pointPolygonTest(contours_frame[0],
                                             ((x_br + h_br), y_br), False)
            ret_val_4 = cv2.pointPolygonTest(contours_frame[0],
                                             ((x_br + h_br), (y_br + w_br)),
                                             False)
            #print ret_val_1,ret_val_2,ret_val_3,ret_val_4
            if ((ret_val_1 == 0) or (ret_val_2 == 0) or (ret_val_3 == 0)
                    or (ret_val_4 == 0)):
                self.boundnessflag = 0

            #cv2.rectangle(tile,(x_br,y_br),(x_br+w_br,y_br+h_br),(255,0,0),2)
            #cv2.imshow("Boundrect", tile)
            # Test Using approxPolyDP
            """
          approx = cv2.approxPolyDP(contours[max_index],0.1*cv2.arcLength(contours[max_index],True),True)
          #cv2.drawContours(tile, approx, -1, (255, 0, 0), 3 )
          cv2.imshow("Contour_Approx", tile)
          out = range(len(approx))              
          print len(approx),type(approx),approx
          for i in range(len(approx)):
             a,b =  approx[i][0]
             print a , b
             out[i] = cv2.pointPolygonTest(contours_frame[0],(a,b) , False)
             print out[i]
             if (out[i] == False):
                flag = 0
                break
          """
            # Create output image
            #if(c0 and c1 and w and h):
            if (self.boundnessflag and self.areaflag):
                rot_mat = cv2.getRotationMatrix2D(
                    (bound_points[0][0], bound_points[0][1]), theta, 1)
                #print(type(rot_mat),rot_mat)
                rotated = cv2.warpAffine(self.tile, rot_mat, (width, height))
                #print rotated
                self.final = cv2.getRectSubPix(rotated, ((int)(w), (int)(h)),
                                               (c0, c1))
                self.outflag = 1
                #cv2.imshow("Final", self.final)
            else:
                self.outflag = 0
Пример #37
0
def load_train_valid_images(image_files,
                            train_index,
                            batch_index,
                            batch_size,
                            rotationAngle,
                            cropImageSize,
                            finalImageSize,
                            channel,
                            substract_each_image_mean=False,
                            augmentation_method="on_the_fly"):
    """Since the whole training image set contains 61578 images, and is too large to fit in 
    the memory, we use this function to load a mini-batch images at a time, and use that
    mini-batch to train the CNN. Note that this 
    
    :type image_files: list
    :param image_files: contains all the file names of the whole training images
    NOTE: image_files contains both training set and validation set
    
    :type train_index: array
    :param train_index: contains all the indices for the training set
    
    :type batch_index: int
    :param batch_index: the index of this batch
    
    :type batch_size: int
    :param batch_size: size of each mini-batch
    
    """

    # the index of this mini-batch
    train_batch_index = train_index[batch_index *
                                    batch_size:(batch_index + 1) * batch_size]
    train_image_files = [image_files[i] for i in train_batch_index]

    images_train = np.zeros((batch_size, channel * (finalImageSize**2)),
                            dtype=theano.config.floatX)
    for i, imgf in enumerate(train_image_files):
        # read JPEG image, see:
        # http://docs.opencv.org/modules/highgui/doc/
        # reading_and_writing_images_and_video.html?highlight=imread#imread
        #
        # flags>0 for 3-channel color image
        # flags=0 for grayscale image
        flags = (0 if channel == 1 else 3)
        if augmentation_method == "pre_computed":
            s = imgf.split("/")
            f, n = s[0], s[1]
            imgf = f + "/angle{}".format(np.int32(rotationAngle)) + "/" + n
            imgf = imgf[:-4] + "_rotationAngle{}_cropImageSize{}.jpg".format(
                rotationAngle, cropImageSize)
            finalImage = cv2.imread(imgf, flags)

        elif augmentation_method == "on_the_fly":
            originalImage = cv2.imread(imgf, flags)
            # use only the central part of size patchSize x patchSize
            height = originalImage.shape[0]
            width = originalImage.shape[1]
            center = (height / 2.0, width / 2.0)
            # about geometric transform using opencv-python, see
            # http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/
            # py_imgproc/py_geometric_transformations/py_geometric_transformations.html
            #image_counter = 0
            #for angle in rotationAngles:
            # we do not scale it, so we set scale = 1
            rotationMatrix = cv2.getRotationMatrix2D(center,
                                                     rotationAngle,
                                                     scale=1)
            # keep the output image with the same size as the input
            rotatedImage = cv2.warpAffine(originalImage, rotationMatrix,
                                          (height, width))
            # see,
            # http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#warpaffine
            #print center_coord
            # crop the central part of size patchSize
            #for cropSize in cropImageSizes:
            croppedImage = cv2.getRectSubPix(rotatedImage,
                                             (cropImageSize, cropImageSize),
                                             center)
            # resize the central part to size imageSize
            finalImage = cv2.resize(croppedImage,
                                    (finalImageSize, finalImageSize))

        # whether we substract mean of each image (and each channel for channel = 3)
        if (substract_each_image_mean == True):
            meanImage = np.mean(np.mean(finalImage, axis=0), axis=0)
            finalImage -= meanImage
        # swap axes, so dim = (imageSize, imageSize, 3) becomes dim = (3, imageSize, imageSize)
        if (channel == 3):
            finalImage = np.swapaxes(np.swapaxes(finalImage, 1, 2), 0, 1)
        # reshape it into 1-D rasterized image
        finalImage = np.reshape(finalImage, channel * (finalImageSize**2))
        images_train[i] = finalImage

    return (images_train)
Пример #38
0
        heightNew = int(width * fabs(sin(radians(degree))) +
                        height * fabs(cos(radians(degree))))
        widthNew = int(height * fabs(sin(radians(degree))) +
                       width * fabs(cos(radians(degree))))

        matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree,
                                              1)

        matRotation[0, 2] += (widthNew - width) / 2  # 重点在这步,目前不懂为什么加这步
        matRotation[1, 2] += (heightNew - height) / 2  # 重点在这步

        img = cv2.warpAffine(img,
                             matRotation, (widthNew, heightNew),
                             borderValue=(255, 255, 255))

    # crop image(Size patchSize:获取矩形的大小, Point2f center:获取的矩形在原图像中的位置)
    # target_img = cv2.getRectSubPix(imgRotation, (900, 1000), (550, 1100))  # go straight
    # img = cv2.getRectSubPix(img, (1080, 1200), (500, 1000))  # go straight(wanghaidong)
    img = cv2.getRectSubPix(
        img, (sub_imgage_width, sub_image_height),
        (sub_image_center_x, sub_image_center_y))  # go straight(wuyiqiang)

    if not os.path.exists(crop_dest_image_path):
        os.makedirs(crop_dest_image_path)
        #  /home/dong/PycharmProjects/traffic-gesture-recognition/datasets/my/go_straight_full_image
        #  /home/dong/PycharmProjects/traffic-gesture-recognition/datasets/my/park_right/yangluxing
    cv2.imwrite(os.path.join(crop_dest_image_path, file), img)

    # cv2.imshow('img', target_img)
    # cv2.waitKey()
Пример #39
0
    def get_vein_img(self, save_vein_pic=True, save_bb=True):
        crop = []
        for sample in range(0, self.total_input):

            # Error removing for augmented data---------------------
            file, point, point_pred = str(
                self.img_name[sample]
            ), self.output[sample], self.target[sample]
            if ((file.find('_flrot_') != -1) |
                (file.find('_flrotVera_') != -1)):
                point1 = np.array(point[0:2])
                point2 = np.array(point[2:4])
                point_changed = []
                point_changed.append(point2)
                point_changed.append(point1)
                self.output[sample] = np.array(point_changed).reshape((1, 4))

                point1 = np.array(point_pred[0:2])
                point2 = np.array(point_pred[2:4])
                point_changed = []
                point_changed.append(point2)
                point_changed.append(point1)
                self.target[sample] = np.array(point_changed).reshape((1, 4))
            # -------------------------------------------------------

            top_left = self.output[sample, 0:2]
            top_right = self.output[sample, 2:4]

            # Find the angle to rotate the image
            angle = (180 / np.pi) * (np.arctan(
                (top_left[1] - top_right[1]) / (top_left[0] - top_right[0])))

            # Rotate the image to cut rectangle from the images
            points_pred = (self.output[sample]).reshape((1, 2, 2))
            points_test = (self.target[sample]).reshape((1, 2, 2))
            img = cv2.imread(self.data_folder + self.img_name[sample])
            # image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
            image = []
            image.append(img)
            image = np.array(image)
            image_rotated, keypoints_pred_rotated = iaa.Affine(rotate=-angle)(
                images=image, keypoints=points_pred)
            _, keypoints_test_rotated = iaa.Affine(rotate=-angle)(
                images=image, keypoints=points_test)

            # Check if the image is fully rotated that left goes to the right side of hand
            if (keypoints_pred_rotated[0, 0, 0] > keypoints_pred_rotated[0, 1,
                                                                         0]):
                # Again rotate the picture to 180 with the points
                image = image_rotated
                image_rotated, keypoints_pred_rotated = iaa.Affine(rotate=180)(
                    images=image, keypoints=keypoints_pred_rotated)
                _, keypoints_test_rotated = iaa.Affine(rotate=180)(
                    images=image, keypoints=keypoints_test_rotated)

            image_rotated = image_rotated[0]
            keypoints_pred_rotated = keypoints_pred_rotated.reshape((2, 2))
            keypoints_test_rotated = keypoints_test_rotated.reshape((2, 2))

            # Rotated Points
            top_left = keypoints_pred_rotated[0]
            top_left[0] = top_left[0] - self.th
            top_right = keypoints_pred_rotated[1]
            top_right[0] = top_right[0] + self.th
            self.width = int(abs(top_right - top_left)[0])
            self.height = int(self.width * (90 / 80))
            centre = tuple([
                top_left[0] + int(self.width / 2),
                top_left[1] + int(self.height / 2)
            ])

            # Crop the Vein Image
            cropped = cv2.getRectSubPix(image_rotated,
                                        (self.width, self.height), centre)
            crop.append(cropped)
            if (save_vein_pic):
                cv2.imwrite(self.cropped_fldr + self.img_name[sample], cropped)

            # Draw Predicted Troughs
            points = keypoints_pred_rotated.reshape((2, 2))
            color = [(255, 255, 255),
                     (0, 0, 0)]  # Left - White, # Right - Black
            count = 0
            for point in points:
                point = np.array(point).astype(int)
                cv2.circle(image_rotated, (point[0], point[1]), 5,
                           color[count], -1)
                count += 1

            # Draw Actual Troughs
            points = keypoints_test_rotated.reshape((2, 2))
            for point in points:
                point = np.array(point).astype(int)
                cv2.circle(image_rotated, (point[0], point[1]), 5, (255, 0, 0),
                           -1)

            bottom_right = [
                int(top_left[0] + self.width),
                int(top_left[1] + self.height)
            ]

            # Draw Bounding Boxes and Save the image
            image_rotated = cv2.rectangle(image_rotated, tuple(top_left),
                                          tuple(bottom_right), (0, 0, 0), 2)
            if (save_bb):
                cv2.imwrite(self.bounding_box_folder + self.img_name[sample],
                            image_rotated)
        crop = np.array(crop)
        return crop
Пример #40
0
    triangle_hypotenus = np.linalg.norm(
        np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -
        np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']]))

    angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus))

    rotation_matrix = cv2.getRotationMatrix2D(center=(plate_cx, plate_cy),
                                              angle=angle,
                                              scale=1.0)

    img_rotated = cv2.warpAffine(img_thresh,
                                 M=rotation_matrix,
                                 dsize=(width, height))

    img_cropped = cv2.getRectSubPix(img_rotated,
                                    patchSize=(int(plate_width),
                                               int(plate_height)),
                                    center=(int(plate_cx), int(plate_cy)))

    if img_cropped.shape[1] / img_cropped.shape[
            0] < MIN_PLATE_RATIO or img_cropped.shape[1] / img_cropped.shape[
                0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO:
        continue

    plate_imgs.append(img_cropped)
    plate_infos.append({
        'x': int(plate_cx - plate_width / 2),
        'y': int(plate_cy - plate_height / 2),
        'w': int(plate_width),
        'h': int(plate_height)
    })
    def add_new_cube(self, image):
        """"This function takes care of adding a new colorcube to the range of detection cubes."""
        # Try to add the area specified by the most recent click.

        # Determine the area to use.
        # This is a square area of self.cube_size pixels.
        area_to_be_added = cv2.getRectSubPix(
            image, (self.cube_size, self.cube_size),
            (float(self.add_x), float(self.add_y)))

        # Determine the min and max values in that area.
        # Split the image
        (hue, saturation, value) = cv2.split(area_to_be_added)
        # Determine the min and max in the area.
        # These min and max are increased/decreased to improve detection.
        hue_max = numpy.amin([180, numpy.amax(hue) + self.increase_h])
        hue_min = numpy.amax([0, numpy.amin(hue) - self.increase_h])
        print hue
        if (hue_max - hue_min) > 100:
            #Ok, probably it is some kind of a red color (around 0):
            #So we need to determin the min and max around 0:
            #TODO: Make more efficient using numpy or so:
            hue_min = 180
            hue_max = 0
            for hue_val_line in hue:
                for hue_val in hue_val_line:
                    print hue_val
                    if hue_val > 90:
                        if hue_val < hue_min:
                            hue_min = hue_val
                    if hue_val <= 90:
                        if hue_val > hue_max:
                            hue_max = hue_val

        saturation_max = numpy.amin(
            [255, numpy.amax(saturation) + self.increase_s])
        saturation_min = numpy.amax(
            [0, numpy.amin(saturation) - self.increase_s])

        value_max = numpy.amin([255, numpy.amax(value) + self.increase_v])
        value_min = numpy.amax([0, numpy.amin(value) - self.increase_v])

        # Add these to a dict.
        new_cube = {
            'h_lower': hue_min,
            'h_upper': hue_max,
            's_lower': saturation_min,
            's_upper': saturation_max,
            'v_lower': value_min,
            'v_upper': value_max
        }

        print "Made new dict."
        print new_cube

        # Add the dict to the detection dicts.
        ((self.to_be_detected[self.current_target])[1]).append(
            copy.deepcopy(new_cube))

        # And it's done!
        self.add_from_next_image = False
Пример #42
0
INIT_LR = 1e-3
BS = 32
#intialize the data and labels

print("[INFO] loading images...")
data = []
labels = []
# grab the image ad randomly shuffle them
imagePaths = sorted(list(paths.list_images(args["dataset"])))
random.seed(42)
random.shuffle(imagePaths)
# lopp over the input images
for imagePath in imagePaths:
    # load the image ,pre-process it , and store it to the data list
    image = cv2.imread(imagePath)
    image = cv2.getRectSubPix(image, (320, 240), (150, 150))
    thresh = 136
    image = cv2.threshold(image, thresh, 255, cv2.THRESH_BINARY)[1]
    image = cv2.resize(image, (28, 28))
    image = img_to_array(image)
    data.append(image)
    # extract the class label from the image path and update the label list
    label = imagePath.split(os.path.sep)[-2]

    if label == "no":
        label = 0
    elif label == "first":
        label = 1
    elif label == "second":
        label = 2
    elif label == "third":
Пример #43
0
    def init(self,first_frame,bbox):

        bbox = np.array(bbox).astype(np.int64)
        x0, y0, w, h = tuple(bbox)
        self.target_sz=(w,h)
        self._center = (int(x0 + w / 2),int( y0 + h / 2))
        if w*h>self.translation_model_max_area:
            self.sc=np.sqrt(w*h/self.translation_model_max_area)
        else:
            self.sc=1.
        self.base_target_sz=(w/self.sc,h/self.sc)
        self.win_sz = (int(np.floor(self.base_target_sz[0] * (1 + self.padding))), int(np.floor(self.base_target_sz[1] * (1 + self.padding))))


        output_sigma=np.sqrt(self.base_target_sz[0]*self.base_target_sz[1])*self.output_sigma_factor/self.cell_size
        use_sz=(int(np.floor(self.win_sz[0]/self.cell_size)),int(np.floor(self.win_sz[1]/self.cell_size)))

        self.yf = fft2(0.5*gaussian2d_rolled_labels(use_sz,sigma=output_sigma))
        self.interp_sz=(use_sz[0]*self.cell_size,use_sz[1]*self.cell_size)
        self._window=cos_window(use_sz)

        if self.scale_type=='normal':
            self.scale_estimator = DSSTScaleEstimator(self.target_sz, config=self.scale_config)
            self.scale_estimator.init(first_frame, self._center, self.base_target_sz, self.sc)
            self._num_scales = self.scale_estimator.num_scales
            self._scale_step = self.scale_estimator.scale_step

            self._min_scale_factor = self._scale_step ** np.ceil(
                np.log(np.max(5 / np.array(([self.win_sz[0], self.win_sz[1]])))) / np.log(self._scale_step))
            self._max_scale_factor = self._scale_step ** np.floor(np.log(np.min(
                first_frame.shape[:2] / np.array([self.base_target_sz[1], self.base_target_sz[0]]))) / np.log(
                self._scale_step))
        elif self.scale_type=='LP':
            self.scale_estimator=LPScaleEstimator(self.target_sz,config=self.scale_config)
            self.scale_estimator.init(first_frame,self._center,self.base_target_sz,self.sc)


        self.cn_sigma = self.cn_sigma_color
        self.hog_sigma = self.hog_sigma_color
        self.lr_hog = self.lr_hog_color
        self.lr_cn = self.lr_cn_color
        self.modnum = self.gap
        self.is_gray = False



        patch=cv2.getRectSubPix(first_frame,self.win_sz,self._center).astype(np.uint8)
        self.z_hog,self.z_cn=self.get_features(patch,cell_size=self.cell_size)


        data_matrix_cn=self.z_cn.reshape((-1,self.z_cn.shape[2]))
        pca_basis_cn,_,_=np.linalg.svd(data_matrix_cn.T.dot(data_matrix_cn))
        self.projection_matrix_cn=pca_basis_cn[:,:self.num_compressed_dim_cn]

        data_matrix_hog=self.z_hog.reshape((-1,self.z_hog.shape[2]))
        pca_basis_hog,_,_=np.linalg.svd(data_matrix_hog.T.dot(data_matrix_hog))
        self.projection_matrix_hog=pca_basis_hog[:,:self.num_compressed_dim_hog]

        self.z_cn2,self.z_hog2=self.feature_projection(self.z_cn,self.z_hog,self.projection_matrix_cn,self.projection_matrix_hog,
                                             self._window)
        self.frame_index=1
        self.d=self.train_model()
Пример #44
0
def showresultrect(img_rotated, dsize, center):
    img_crop = cv2.getRectSubPix(img_rotated, (dsize[0], dsize[1]), center)
    img_test = cv2.resize(img_crop, (136, 36))
    return img_test, img_crop
Пример #45
0
fDistToWx = interp1d(distance_arr, x_interval)
fDistToWz = interp1d(distance_arr, fWxToWz(x_interval))

aspect = height / width
output_width = 500
output_height = int(aspect * output_width)

print 'Begin Unwarping'

output_image = np.zeros((output_height, output_width, 3))
for oy in range(len(output_image)):
    for ox in range(len(output_image[0])):
        d = (ox / float(output_width)) * width
        v = (oy / float(output_height)) * height

        wx = fDistToWx(d)
        wy = v
        wz = fDistToWz(d)

        ix, iy = world_to_source(wx, wy, wz)

        image_x = (image_width / 2) - ix
        image_y = (image_height / 2) - iy

        pixel = cv2.getRectSubPix(image, (1, 1), (image_x, image_y))
        output_image[output_height - oy - 1,
                     output_width - ox - 1] = pixel[0, 0] / 255.0

cv2.imwrite('./test_focal_length/' + str(focal) + '.png', output_image * 255.0)
Пример #46
0
                cv.circle(blobs, (x, y), int(radius), color, 5)
                cv.circle(blobs, (x, y), int(1), color, 5)
                draw_str(blobs, (x, y), "%d" % i)

            cv.imshow('blobs', blobs)
            cv.imshow('index_map', index_map)

            s = 4
            patchsize = (s, s)

            # triangle center
            # TODO: get from tracker
            px = 600 * f
            py = 200 * f
            center = (px, py)
            zoom = cv.getRectSubPix(hsv1, patchsize, center)
            show('zoom', zoom)

            # t = 64
            t = sz // s
            # t*t = total patches

            i = read_curr_index
            ci = ((i % (t * t)) // t) * s
            cj = (i % t) * s
            # zoom[:,:,1] = 255  # s
            # zoom[:,:,2] = 255  # v

            rgb_zoom = cv.cvtColor(zoom, cv.COLOR_HSV2BGR)

            img[ci:ci + s, cj:cj + s, :] = rgb_zoom
Пример #47
0
def stitch(img1, img2, H, inv_H):
    print('----Stitching Images ------- ')
    # Find StitchedImage size
    # print('Image1 dimensions', img1.shape)

    # Four Points of Image1
    h1, w1, d1 = img1.shape
    img1points1 = [[0, 0]]
    img1points2 = [[w1, 0]]
    img1points3 = [[0, h1]]
    img1points4 = [[w1, h1]]

    # points1 = np.array([img1points1, img1points2, img1points3, img1points4], np.float32)

    # print('Points1 Image', points1)

    # Four points of Image2
    h2, w2, d2 = img2.shape
    x, y = project(0, 0, inv_H)
    x1, y1 = project(w2, 0, inv_H)
    x2, y2 = project(0, h2, inv_H)
    x3, y3 = project(w2, h2, inv_H)

    img2points1 = [[x, y]]
    img2points2 = [[x1, y1]]
    img2points3 = [[x2, y2]]
    img2points4 = [[x3, y3]]

    points = np.array(
        [img1points1, img1points2, img1points3, img1points4, img2points1, img2points2, img2points3, img2points4],
        np.float32)

    boundaryPoints = cv2.boundingRect(points)
    w = boundaryPoints[2] - boundaryPoints[0]
    h = boundaryPoints[3] - boundaryPoints[1]
    stichedImage = np.zeros([h, w, 3], np.uint8)

    # cv2.imshow('Stitched Window', stichedImage)
    # cv2.waitKey()

    print('Image 1 dimensions', img1.shape, 'Image2 dimensions', img2.shape)
    print('Image 1 boundary points', img1points1, img1points2, img1points3, img1points4)
    print('Image 2 boundary', (0, 0), (w2, 0), (0, h2), (w2, h2))
    print('Projected Point', img2points1, img2points2, img2points3, img2points4)
    print('Boundary Points', boundaryPoints)
    print('Stitched Image', stichedImage.shape)

    # Copy Image 1 to stichedImage
    # stichedImage[0:img1.shape[0], 0:img1.shape[1]] = img1
    for y in range(0, img1.shape[0]):
        for x in range(0, img1.shape[1]):
            stichedImage[y - boundaryPoints[1], x - boundaryPoints[0]] = img1[y,x]


    # Project the stitchedImage in image2 space
    '''
    1. Project each pixel in Stitched Image to image2 
    2. IF the point lies within image2 boundaries add pixel to stitchedImage 
    '''

    for y in range(boundaryPoints[1], stichedImage.shape[0]):
        for x in range(boundaryPoints[0], stichedImage.shape[1]):
            x1, y1 = project(x,y, H[0])
            # print('Projected Points', x1, y1)
            if(0 <= x1 < img2.shape[1]) and (0 <= y1 < img2.shape[0]):
                pixelValueImg2 = cv2.getRectSubPix(img2,(1,1), (x1, y1) )
                if (y-boundaryPoints[1] < stichedImage.shape[0]) and (x-boundaryPoints[0]< stichedImage.shape[1]):
                    stichedImage[y - boundaryPoints[1], x - boundaryPoints[0]] = pixelValueImg2[0][0]


    print('-------Exiting the Stitched Image -----------')
    return stichedImage
def extractPlate(imgOriginal, listOfMatchingChars):
    possiblePlate = PossiblePlate.PossiblePlate(
    )  # this will be the return value

    listOfMatchingChars.sort(
        key=lambda matchingChar: matchingChar.intCenterX
    )  # sort chars from left to right based on x position

    # calculate the center point of the plate
    fltPlateCenterX = (
        listOfMatchingChars[0].intCenterX +
        listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterX) / 2.0
    fltPlateCenterY = (
        listOfMatchingChars[0].intCenterY +
        listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY) / 2.0

    ptPlateCenter = fltPlateCenterX, fltPlateCenterY

    # calculate plate width and height
    intPlateWidth = int(
        (listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectX +
         listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectWidth
         - listOfMatchingChars[0].intBoundingRectX) *
        PLATE_WIDTH_PADDING_FACTOR)

    intTotalOfCharHeights = 0

    for matchingChar in listOfMatchingChars:
        intTotalOfCharHeights = intTotalOfCharHeights + matchingChar.intBoundingRectHeight
    # end for

    fltAverageCharHeight = intTotalOfCharHeights / len(listOfMatchingChars)

    intPlateHeight = int(fltAverageCharHeight * PLATE_HEIGHT_PADDING_FACTOR)

    # calculate correction angle of plate region
    fltOpposite = listOfMatchingChars[
        len(listOfMatchingChars) -
        1].intCenterY - listOfMatchingChars[0].intCenterY
    fltHypotenuse = DetectChars.distanceBetweenChars(
        listOfMatchingChars[0],
        listOfMatchingChars[len(listOfMatchingChars) - 1])
    fltCorrectionAngleInRad = math.asin(fltOpposite / fltHypotenuse)
    fltCorrectionAngleInDeg = fltCorrectionAngleInRad * (180.0 / math.pi)

    # pack plate region center point, width and height, and correction angle into rotated rect member variable of plate
    possiblePlate.rrLocationOfPlateInScene = (tuple(ptPlateCenter),
                                              (intPlateWidth, intPlateHeight),
                                              fltCorrectionAngleInDeg)

    # final steps are to perform the actual rotation

    # get the rotation matrix for our calculated correction angle
    rotationMatrix = cv2.getRotationMatrix2D(tuple(ptPlateCenter),
                                             fltCorrectionAngleInDeg, 1.0)

    height, width, numChannels = imgOriginal.shape  # unpack original image width and height

    imgRotated = cv2.warpAffine(imgOriginal, rotationMatrix,
                                (width, height))  # rotate the entire image

    imgCropped = cv2.getRectSubPix(imgRotated, (intPlateWidth, intPlateHeight),
                                   tuple(ptPlateCenter))

    possiblePlate.imgPlate = imgCropped  # copy the cropped plate image into the applicable member variable of the possible plate

    return possiblePlate
Пример #49
0
def cropImage(image, rectangle):
    x, y = rectangle[0], rectangle[1]
    h, w = rectangle[2], rectangle[3]
    return cv2.getRectSubPix(image, (int(h), int(w)), (x + h / 2, y + w / 2))
Пример #50
0
    # pack plate region center point, width and height, and correction angle into rotated rect member variable of plate
    possiblePlate.rrLocationOfPlateInScene = (tuple(plateCenter),
                                              (plateWidth, plateHeight),
                                              correctionAngleInDeg)

    # get the rotation matrix for our calculated correction angle
    rotationMatrix = cv2.getRotationMatrix2D(tuple(plateCenter),
                                             correctionAngleInDeg, 1.0)

    height, width, numChannels = img.shape

    # rotate the entire image
    imgRotated = cv2.warpAffine(img, rotationMatrix, (width, height))

    # crop the image/plate detected
    imgCropped = cv2.getRectSubPix(imgRotated, (plateWidth, plateHeight),
                                   tuple(plateCenter))

    # copy the cropped plate image into the applicable member variable of the possible plate
    possiblePlate.Plate = imgCropped

    # populate plates_list with the detected plate
    if possiblePlate.Plate is not None:
        plates_list.append(possiblePlate)

    # draw a ROI on the original image
    for i in range(0, len(plates_list)):
        # finds the four vertices of a rotated rect - it is useful to draw the rectangle.
        p2fRectPoints = cv2.boxPoints(plates_list[i].rrLocationOfPlateInScene)

        # roi rectangle colour
        rectColour = (0, 255, 0)
def extract_patch_sz(img, sz, xy):
    sub = cv2.getRectSubPix(img, (int(round(sz)), int(round(sz))), xy)
    res = cv2.resize(sub, (input_sz, input_sz))
    return res
Пример #52
0
cv2.imwrite('./results/result_phase_1.png', canvas)

relighted = face_relighting.relight(image, is_bgr=True)
cv2.imwrite('./results/result_phase_2.png', relighted)

mask = removeBG_API_request.remove_background(relighted,
                                              image_format='.jpg',
                                              is_RGB=False)[:, :, 3]
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
cv2.imwrite('./results/result_phase_3.png', mask)

h_div_w = 1.231
new_w = w
new_h = int(w * h_div_w)
output = cv2.getRectSubPix(relighted, (new_w, new_h), (w / 2, h / 2))
mask = cv2.getRectSubPix(mask, (new_w, new_h), (w / 2, h / 2))

white = np.ones_like(output) * 255

result = (white * (1 - (mask / 255.)) + (mask / 255.) * output).astype('uint8')

cv2.imwrite('./results/result_last_phase.png', result)

fig, axs = plt.subplots(3, 3)
fig.suptitle('Diego Bonilla auto-photo-ID')

axs[0, 0].imshow(white)
axs[0, 0].axis('off')
axs[2, 0].imshow(white)
axs[2, 0].axis('off')
def get_subpixel(img, y, x):
    patch = cv2.getRectSubPix(img, (1, 1), (x, y))
    return patch[0][0]
Пример #54
0
    def get_license_plate_char(self, image):
        # Read image
        img_ori = image

        if type(image) == str:
            if platform.system().lower() == 'windows' and image[0] == '~':
                image = os.environ['USERPROFILE'] + image[1:]
            image = os.path.abspath(image)
            img_ori = cv2.imread(image)

        if type(img_ori) is not np.ndarray:
            raise ValueError('ERROR: invalid image!')

        height, width, channel = img_ori.shape

        # Convert image to grayscale
        gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)

        # Maximize constrast
        structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT,
                                     structuringElement)
        imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT,
                                       structuringElement)
        imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
        gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)

        # Thresholding
        img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)
        usingAdaptive = True
        if usingAdaptive:
            img_thresh = cv2.adaptiveThreshold(
                img_blurred,
                maxValue=255.0,
                adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                thresholdType=cv2.THRESH_BINARY_INV,
                blockSize=19,
                C=12)
        else:
            _, img_thresh = cv2.threshold(img_blurred,
                                          thresh=0,
                                          maxval=255,
                                          type=cv2.THRESH_BINARY_INV
                                          | cv2.THRESH_OTSU)

        # Find contours
        contours, _ = cv2.findContours(img_thresh,
                                       mode=cv2.RETR_LIST,
                                       method=cv2.CHAIN_APPROX_SIMPLE)

        # Prepare data
        contours_dict = []
        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)
            # instert to dict
            contours_dict.append({
                'contour': contour,
                'x': x,
                'y': y,
                'w': w,
                'h': h,
                'cx': x + (w / 2),
                'cy': y + (h / 2)
            })

        # Select candidates by char size
        MIN_AREA = 80
        MIN_WIDTH, MIN_HEIGHT = 2, 8
        MIN_RATIO, MAX_RATIO = 0.25, 1.0

        possible_contours = []
        cnt = 0
        for d in contours_dict:
            area = d['w'] * d['h']
            ratio = d['w'] / d['h']
            if area > MIN_AREA \
            and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT \
            and MIN_RATIO < ratio < MAX_RATIO:
                d['idx'] = cnt
                cnt += 1
                possible_contours.append(d)

        # Select candidates by arrangement of contours
        MAX_DIAG_MULTIPLYER = 5  # 5
        MAX_ANGLE_DIFF = 12.0  # 12.0
        MAX_AREA_DIFF = 0.5  # 0.5
        MAX_WIDTH_DIFF = 0.8  # 0.8
        MAX_HEIGHT_DIFF = 0.2  # 0.2
        MIN_N_MATCHED = 5  # 3

        def find_chars(contour_list):
            matched_result_idx = []
            for d1 in contour_list:
                matched_contours_idx = []
                for d2 in contour_list:
                    if d1['idx'] == d2['idx']:
                        continue
                    dx = abs(d1['cx'] - d2['cx'])
                    dy = abs(d1['cy'] - d2['cy'])
                    diagonal_length = np.sqrt(d1['w']**2 + d1['h']**2)
                    distance = np.linalg.norm(
                        np.array((d1['cx'], d1['cy'])) -
                        np.array((d2['cx'], d2['cy'])))
                    if dx == 0:
                        angle_diff = 90
                    else:
                        angle_diff = np.degrees(np.arctan(dy / dx))
                    area_diff = abs(d1['w'] * d1['h'] -
                                    d2['w'] * d2['h']) / (d1['w'] * d1['h'])
                    width_diff = abs(d1['w'] - d2['w']) / d1['w']
                    height_diff = abs(d1['h'] - d2['h']) / d1['h']
                    if distance < diagonal_length * MAX_DIAG_MULTIPLYER \
                    and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF \
                    and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:
                        matched_contours_idx.append(d2['idx'])
                # append this contour
                matched_contours_idx.append(d1['idx'])

                if len(matched_contours_idx) < MIN_N_MATCHED:
                    continue

                matched_result_idx.append(matched_contours_idx)

                unmatched_contour_idx = []
                for d4 in contour_list:
                    if d4['idx'] not in matched_contours_idx:
                        unmatched_contour_idx.append(d4['idx'])
                unmatched_contour = np.take(possible_contours,
                                            unmatched_contour_idx)

                # recursive
                recursive_contour_list = find_chars(unmatched_contour)
                for idx in recursive_contour_list:
                    matched_result_idx.append(idx)

                break

            # optimizing
            ret = []
            for idx_list in matched_result_idx:
                matched_contour = np.take(possible_contours, idx_list)
                sorted_contour = sorted(matched_contour, key=lambda x: x['x'])
                matched = []
                for i in range(len(sorted_contour) - 1):
                    d1 = sorted_contour[i]
                    d2 = sorted_contour[i + 1]
                    if len(matched) == 0:
                        matched.append(d1['idx'])
                    diagonal_length = np.sqrt(d1['w']**2 + d1['h']**2)
                    distance = np.linalg.norm(
                        np.array((d1['cx'], d1['cy'])) -
                        np.array((d2['cx'], d2['cy'])))
                    if distance > diagonal_length * 3:
                        sorted_contour = sorted_contour[:i + 1]
                        break
                    matched.append(d2['idx'])
                if len(matched) > 0:
                    ret.append(matched)

            # return matched_result_idx
            return ret

        result_idx = find_chars(possible_contours)

        matched_result = []
        for idx_list in result_idx:
            matched_result.append(np.take(possible_contours, idx_list))

        # Rotate plate image
        PLATE_WIDTH_PADDING = 1.1
        # PLATE_HEIGHT_PADDING = 1.1
        MIN_PLATE_RATIO = 3
        MAX_PLATE_RATIO = 12

        plate_imgs = []
        plate_infos = []
        for matched_chars in matched_result:
            sorted_chars = sorted(matched_chars, key=lambda x: x['x'])
            plate_cx = (sorted_chars[0]['cx'] + sorted_chars[-1]['cx']) / 2
            plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2
            plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1]['w'] -
                           sorted_chars[0]['x']) * PLATE_WIDTH_PADDING
            sum_height = 0
            for d in sorted_chars:
                sum_height += d['h']
            plate_height = int(sum_height / len(sorted_chars) *
                               PLATE_WIDTH_PADDING)

            triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy']
            triangle_hypotenus = np.linalg.norm(
                np.array((sorted_chars[0]['cx'], sorted_chars[0]['cy'])) -
                np.array((sorted_chars[-1]['cx'], sorted_chars[-1]['cy'])))
            angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus))
            rotation_matrix = cv2.getRotationMatrix2D(center=(plate_cx,
                                                              plate_cy),
                                                      angle=angle,
                                                      scale=1.0)

            img_rotated = cv2.warpAffine(img_thresh,
                                         M=rotation_matrix,
                                         dsize=(width, height))
            img_cropped = cv2.getRectSubPix(img_rotated,
                                            patchSize=(int(plate_width),
                                                       int(plate_height)),
                                            center=(int(plate_cx),
                                                    int(plate_cy)))

            ratio = img_cropped.shape[1] / img_cropped.shape[0]
            if ratio < MIN_PLATE_RATIO or ratio > MAX_PLATE_RATIO:
                continue

            plate_imgs.append(img_cropped)
            plate_infos.append({
                'x': int(plate_cx - plate_width / 2),
                'y': int(plate_cy - plate_height / 2),
                'w': int(plate_width),
                'h': int(plate_height)
            })

        # Another thresholding to find chars
        longest_idx, longest_text = -1, 0
        plate_chars = []

        for i, plate_img in enumerate(plate_imgs):
            plate_img = cv2.resize(plate_img, dsize=(0, 0), fx=1.6, fy=1.6)
            _, plate_img = cv2.threshold(plate_img,
                                         thresh=0.0,
                                         maxval=255.0,
                                         type=cv2.THRESH_BINARY
                                         | cv2.THRESH_OTSU)

            # find contours again (same as above)
            contours, _ = cv2.findContours(plate_img,
                                           mode=cv2.RETR_LIST,
                                           method=cv2.CHAIN_APPROX_SIMPLE)
            plate_min_x, plate_min_y = plate_img.shape[1], plate_img.shape[0]
            plate_max_x, plate_max_y = 0, 0

            for contour in contours:
                x, y, w, h = cv2.boundingRect(contour)
                area = w * h
                ratio = w / h
                if area > MIN_AREA and w > MIN_WIDTH and h > MIN_HEIGHT and MIN_RATIO < ratio < MAX_RATIO:
                    if x < plate_min_x:
                        plate_min_x = x
                    if y < plate_min_y:
                        plate_min_y = y
                    if x + w > plate_max_x:
                        plate_max_x = x + w
                    if y + h > plate_max_y:
                        plate_max_y = y + h

            img_result = plate_img[plate_min_y:plate_max_y,
                                   plate_min_x:plate_max_x]
            img_result = cv2.GaussianBlur(img_result, ksize=(3, 3), sigmaX=0)
            _, img_result = cv2.threshold(img_result,
                                          thresh=0.0,
                                          maxval=255.0,
                                          type=cv2.THRESH_BINARY
                                          | cv2.THRESH_OTSU)

            # dilation
            kernel = np.ones((2, 2), np.uint8)
            img_result = cv2.dilate(img_result, kernel=kernel)

            img_result = cv2.copyMakeBorder(img_result,
                                            top=10,
                                            bottom=10,
                                            left=10,
                                            right=10,
                                            borderType=cv2.BORDER_CONSTANT,
                                            value=(0, 0, 0))

            chars = pytesseract.image_to_string(img_result,
                                                lang='kor',
                                                config='--psm 7 --oem 0')

            result_chars = ''
            has_digit = False
            for c in chars:
                if ord('가') <= ord(c) <= ord('힣') or c.isdigit():
                    if c.isdigit():
                        has_digit = True
                    result_chars += c

            plate_chars.append(result_chars)
            if has_digit and len(result_chars) > longest_text:
                longest_idx = i
                longest_text = len(result_chars)

        # Result
        if len(plate_chars) == 0:
            gc.collect()
            return None
        # info = plate_infos[longest_idx]
        chars = plate_chars[longest_idx]

        # print(chars)
        gc.collect()
        return chars
Пример #55
0
        isSignal = isSignal - 1

    # Из сглаженного текущего кадра (t_plus) вычитаем предыдущий (t_minus).
    #t_minus = cv2.GaussianBlur(t_minus, (5,5), 0)
    t_plus = cv2.GaussianBlur(t_plus, (5, 5), 0)
    mask = cv2.absdiff(t_minus, t_plus)

    # Сравниваем значения полученной разности с некоторым пороговым значением
    retval, mask = cv2.threshold(mask, 25, 255, cv2.THRESH_BINARY)

    # Применяем морфологические операции закрытия и открытия, чтобы избавиться от
    # движущихся регионов малого размера (шумы камеры)
    kernel = np.ones((5, 5), np.uint8)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    mask_min = cv2.getRectSubPix(
        mask, (width, width), (point1[0] + width / 2, point1[1] + width / 2))
    countNonZero = cv2.countNonZero(mask_min)

    if intervalId > 0:
        intervalId = intervalId - 1
    else:
        intervalId = 1000
        print 'countNonZero = ', countNonZero

    # Если обнаружено движение
    if countNonZero > countLimit:
        print 'motion detected, countNonZero = ', countNonZero

        # Сохранение фотки в файл
        if saveToFile:
            params = list()
Пример #56
0
    def detect (self, image, threshold = 0.7):
        match_method = cv2.TM_CCOEFF_NORMED
        detected = {}
        num_objects = 0
            
        with Timer('detection'):
            # Compare squares against patterns, and if not recognized, take the sample as new pattern
            for i, cnt in enumerate(self.squares):
                max_found = threshold
                max_item = None
                max_location = (-1,-1)
                max_angle = -1
                bounds = cv2.boundingRect(cnt)
                location_abs, size, phi = cv2.minAreaRect(cnt)
                cropped = cv2.getRectSubPix(self.processed, bounds[2:], location_abs)
                location = (location_abs[0]-bounds[0], location_abs[1]-bounds[1])
                w, h = tuple(int(c) for c in size)
                rot = cv2.getRotationMatrix2D(location, phi, 1)
                rotated = cv2.warpAffine(cropped, rot, dsize = cropped.shape[:2], flags=cv2.INTER_CUBIC)                
                sample = cv2.getRectSubPix(rotated, (w-4, h-4), location)
                #sample = cv2.cvtColor(sample, cv2.COLOR_BGR2GRAY)
                sample = cv2.resize(sample, (90, 90))
                #ret, sample = cv2.threshold(sample, 230, 255, cv2.THRESH_BINARY)
                #sample = cv2.equalizeHist(sample)
            
                #cv2.circle(self.original, tuple(int(l) for l in location_abs), 20, (0,0,255))
                #cv2.imshow('crop'+str(i),self.processed)
            
                for tag, pattern in self.patterns.items():
                    match = cv2.matchTemplate(sample, pattern, match_method)
                    result, _, _, _  = cv2.minMaxLoc(match)
                    if result > max_found:
                        #print "Best candidate for %s is %s with %s" % (str(i),tag, str(result))
                        max_found = result
                        max_item = tag
                        max_location = (int(location_abs[0]), int(location_abs[1]))
                        max_angle = phi
                        if result > threshold * 1.5:
                            break
                else:
                    #print "Discarted", str(i)
                    if self.training: 
                        #print "Store new pattern",i
                        cv2.imwrite('patterns/tag-%s.pgm' % str(i), sample)

                if max_item:
                    cv2.drawContours(self.original, self.squares, i, (0, 255, 0), 5)
                    tag_id = max_item.replace("patterns/tag-", "").replace(".pgm", "")
                    max_angle = (360 - max_angle) % 360
                    try:
                        name, angle = tag_id.split("-", 1)
                        #if name=='arrow':
                        #    print "Orig angle", max_angle, "using", angle
                        if angle=='t1':
                            max_angle += 90
                        elif angle=='t2':
                            max_angle += 180
                        elif angle=='t3':
                            max_angle += 270
                    except:
                        name = tag_id
                    cv2.putText(self.original, "%s" % name,
                        (max_location[0]-20, max_location[1]-20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
                    #vmod = 10
                    #p2 = (int(max_location[0] + vmod * np.cos(max_angle + np.pi/4)),
                    #     int(max_location[1] + vmod * np.sin(max_angle + np.pi/4)))
                    #cv2.line(self.original, max_location, p2, (0,255,255), 3)
                    #print "Chosen %s to be %s" % (str(i), max_item)
                    if not name in detected:
                        detected[name] = []
                    detected[name].append({'angle': int(100 * max_angle)/100.0, 'pos': max_location})
                    num_objects +=1
        return num_objects, detected
                                               cv2.CHAIN_APPROX_NONE)

        contours_after_size_verification = []
        for contour in contours:
            if verifySize(height_img, contour):
                contours_after_size_verification.append(contour)

        for contour in contours_after_size_verification:

            # bounding rect
            x, y, w, h = cv2.boundingRect(contour)
            paddingw = int(w / 3)
            paddingh = int(h / 4)
            #img_crop used to reference copy of input, and the written file was img crop
            img_crop = cv2.getRectSubPix(img_contours,
                                         (w + paddingw, h + paddingh),
                                         (x + w / 2, y + h / 2))
            resized_image = cv2.resize(img_crop, (34, 85))

            if "image" not in trace:
                # file_name = trace[:-6]+'_'+str(numImg)+'.png'
                file_name = trace[:-4] + '_' + str(numImg) + '.png'
            elif "image" in trace:
                # file_name = 'lp_' + trace[:-4] + '_' + str(numImg) + '.png'
                file_name = trace[:-4] + '_' + str(numImg) + '.png'
            else:
                print("image not being segmented: ", trace)
                break

            cv2.imwrite("{}/{}".format(PATH_WRITE, file_name), resized_image)
Пример #58
0
    def CarPlate(self):
        global InNum, OutNum, CarNum, InTime, TotalTime, InTimeDB, TotalFee
        plt.style.use('dark_background')
        ############차 이미지 가져오기
        img_car = cv2.imread('car.jpg')
        height, width, channel = img_car.shape  # 사진 크기

        ############그레이스케일 변환
        gray = cv2.cvtColor(img_car, cv2.COLOR_BGR2GRAY)

        ############Maximize Contrast 대비
        structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

        imgTopHat = cv2.morphologyEx(
            gray, cv2.MORPH_TOPHAT, structuringElement)  # Opeining과 원본 이미지의 차이
        imgBlackHat = cv2.morphologyEx(
            gray, cv2.MORPH_BLACKHAT,
            structuringElement)  # Closing과 원본 이미지의 차이

        imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
        gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)

        ###########노이즈 제거 블러 > thereshold
        img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)

        img_thresh = cv2.adaptiveThreshold(
            img_blurred,
            maxValue=255.0,
            adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
            thresholdType=cv2.THRESH_BINARY_INV,
            blockSize=19,
            C=9)

        # 윤곽선 검출 temp_result
        contours, _ = cv2.findContours(img_thresh,
                                       mode=cv2.RETR_LIST,
                                       method=cv2.CHAIN_APPROX_SIMPLE)

        #####윤곽선을 감싸는 사각형 그리기
        temp_result = np.zeros((height, width, channel), dtype=np.uint8)
        contours_dict = []

        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)
            cv2.rectangle(temp_result,
                          pt1=(x, y),
                          pt2=(x + w, y + h),
                          color=(255, 255, 255),
                          thickness=2)

            # insert to dict
            contours_dict.append({
                'contour': contour,
                'x': x,
                'y': y,
                'w': w,
                'h': h,
                'cx': x + (w / 2),
                'cy': y + (h / 2)
            })

        # 번호판 후보 찾기
        MIN_AREA = 80
        MIN_WIDTH, MIN_HEIGHT = 2, 8
        MIN_RATIO, MAX_RATIO = 0.25, 1.0

        possible_contours = []

        cnt = 0
        for d in contours_dict:
            area = d['w'] * d['h']
            ratio = d['w'] / d['h']

            if area > MIN_AREA \
                    and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT \
                    and MIN_RATIO < ratio < MAX_RATIO:
                d['idx'] = cnt
                cnt += 1
                possible_contours.append(d)

        # 윤곽선의 배열로 최종 후보 선정
        MAX_DIAG_MULTIPLYER = 5  # 번호사이 간격
        MAX_ANGLE_DIFF = 12.0  # 번호 사이 각도
        MAX_AREA_DIFF = 0.5  # 번호 면적 차이
        MAX_WIDTH_DIFF = 0.8  # 너비차이
        MAX_HEIGHT_DIFF = 0.2  # 높이차이
        MIN_N_MATCHED = 3  # 위의 5조건 만족하는 최소 개수

        # recursive 방식으로 찾기
        def find_chars(contour_list):
            matched_result_idx = []  # 최종적으로 남는 후보의 인덱스 저장

            for d1 in contour_list:
                matched_contours_idx = []
                for d2 in contour_list:
                    if d1['idx'] == d2['idx']:
                        continue

                    dx = abs(d1['cx'] - d2['cx'])
                    dy = abs(d1['cy'] - d2['cy'])
                    #
                    diagonal_length1 = np.sqrt(d1['w']**2 + d1['h']**2)
                    # 윤곽선 중심과 중심 사이의 거리 구하기
                    distance = np.linalg.norm(
                        np.array([d1['cx'], d1['cy']]) -
                        np.array([d2['cx'], d2['cy']]))
                    if dx == 0:
                        angle_diff = 90  ##윤곽선 사이 각
                    else:
                        angle_diff = np.degrees(np.arctan(dy / dx))

                    area_diff = abs(d1['w'] * d1['h'] -
                                    d2['w'] * d2['h']) / (d1['w'] * d1['h'])
                    width_diff = abs(d1['w'] - d2['w']) / d1['w']
                    height_diff = abs(d1['h'] - d2['h']) / d1['h']

                    if distance < diagonal_length1 * MAX_DIAG_MULTIPLYER \
                            and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF \
                            and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:
                        matched_contours_idx.append(d2['idx'])

                # append this contour
                matched_contours_idx.append(d1['idx'])

                if len(matched_contours_idx) < MIN_N_MATCHED:
                    continue

                matched_result_idx.append(matched_contours_idx)

                unmatched_contour_idx = []
                for d4 in contour_list:
                    if d4['idx'] not in matched_contours_idx:
                        unmatched_contour_idx.append(d4['idx'])

                unmatched_contour = np.take(
                    possible_contours, unmatched_contour_idx)  # 인덱스 같은 값만 추출

                # recursive
                recursive_contour_list = find_chars(unmatched_contour)

                for idx in recursive_contour_list:
                    matched_result_idx.append(idx)

                break

            return matched_result_idx

        result_idx = find_chars(possible_contours)

        matched_result = []
        for idx_list in result_idx:
            matched_result.append(np.take(possible_contours, idx_list))

        # 기울어진 이미지 회전
        PLATE_WIDTH_PADDING = 1.3  # 1.3
        PLATE_HEIGHT_PADDING = 1.5  # 1.5
        MIN_PLATE_RATIO = 3
        MAX_PLATE_RATIO = 10

        plate_imgs = []
        plate_infos = []

        for i, matched_chars in enumerate(matched_result):
            sorted_chars = sorted(matched_chars, key=lambda x: x['cx'])

            plate_cx = (sorted_chars[0]['cx'] + sorted_chars[-1]['cx']) / 2
            plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2

            plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1]['w'] -
                           sorted_chars[0]['x']) * PLATE_WIDTH_PADDING

            sum_height = 0
            for d in sorted_chars:
                sum_height += d['h']

            plate_height = int(sum_height / len(sorted_chars) *
                               PLATE_HEIGHT_PADDING)

            triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy']
            triangle_hypotenus = np.linalg.norm(
                np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -
                np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']]))

            angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus))

            rotation_matrix = cv2.getRotationMatrix2D(center=(plate_cx,
                                                              plate_cy),
                                                      angle=angle,
                                                      scale=1.0)

            img_rotated = cv2.warpAffine(img_thresh,
                                         M=rotation_matrix,
                                         dsize=(width, height))

            img_cropped = cv2.getRectSubPix(img_rotated,
                                            patchSize=(int(plate_width),
                                                       int(plate_height)),
                                            center=(int(plate_cx),
                                                    int(plate_cy)))

            if img_cropped.shape[1] / img_cropped.shape[0] < MIN_PLATE_RATIO or img_cropped.shape[1] / \
                    img_cropped.shape[
                        0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO:
                continue

            plate_imgs.append(img_cropped)
            plate_infos.append({
                'x': int(plate_cx - plate_width / 2),
                'y': int(plate_cy - plate_height / 2),
                'w': int(plate_width),
                'h': int(plate_height)
            })

        ####한번더 threshold
        longest_idx, longest_text = -1, 0
        plate_chars = []

        for i, plate_img in enumerate(plate_imgs):
            plate_img = cv2.resize(plate_img, dsize=(0, 0), fx=1.6, fy=1.6)
            _, plate_img = cv2.threshold(plate_img,
                                         thresh=0.0,
                                         maxval=255.0,
                                         type=cv2.THRESH_BINARY
                                         | cv2.THRESH_OTSU)

            contours, _ = cv2.findContours(plate_img,
                                           mode=cv2.RETR_LIST,
                                           method=cv2.CHAIN_APPROX_SIMPLE)

            plate_min_x, plate_min_y = plate_img.shape[1], plate_img.shape[0]
            plate_max_x, plate_max_y = 0, 0

            for contour in contours:
                x, y, w, h = cv2.boundingRect(contour)

                area = w * h
                ratio = w / h
                if area > MIN_AREA \
                        and w > MIN_WIDTH and h > MIN_HEIGHT \
                        and MIN_RATIO < ratio < MAX_RATIO:
                    if x < plate_min_x:
                        plate_min_x = x
                    if y < plate_min_y:
                        plate_min_y = y
                    if x + w > plate_max_x:
                        plate_max_x = x + w
                    if y + h > plate_max_y:
                        plate_max_y = y + h

            img_result = plate_img[plate_min_y:plate_max_y,
                                   plate_min_x:plate_max_x]
            img_result = cv2.GaussianBlur(img_result, ksize=(3, 3), sigmaX=0)
            _, img_result = cv2.threshold(img_result,
                                          thresh=0.0,
                                          maxval=255.0,
                                          type=cv2.THRESH_BINARY
                                          | cv2.THRESH_OTSU)
            img_result = cv2.copyMakeBorder(img_result,
                                            top=10,
                                            bottom=10,
                                            left=10,
                                            right=10,
                                            borderType=cv2.BORDER_CONSTANT,
                                            value=(0, 0, 0))

            ####글자 인식
            text = pytesseract.image_to_string(img_result,
                                               lang='kor',
                                               config='--psm 7 --oem 0')
            result_chars = ''
            has_digit = False  ##숫자포함문자열

            for c in text:
                if ord('가') <= ord(c) <= ord('힣') or c.isdigit():
                    if c.isdigit():
                        has_digit = True
                    result_chars += c

            plate_chars.append(result_chars)

            if has_digit and len(result_chars) > longest_text:
                longest_idx = i

        info = plate_infos[longest_idx]
        CarNum = plate_chars[longest_idx]

        print("차량 번호 ", CarNum)
        img_out = img_car.copy()

        cv2.rectangle(img_out,
                      pt1=(info['x'], info['y']),
                      pt2=(info['x'] + info['w'], info['y'] + info['h']),
                      color=(255, 0, 0),
                      thickness=2)

        CarNUmber = plt.figure(figsize=(8, 6))
        plt.imshow(img_out)

        plt.show()
        reply = QMessageBox.question(self, '차량 번호 확인',
                                     '차량 번호는 %s가 맞습니까?' % CarNum,
                                     QMessageBox.Yes | QMessageBox.No,
                                     QMessageBox.No)
        if reply == QMessageBox.Yes:
            plt.close()
            cv2.destroyWindow('Car Video')

        else:
            exit()
Пример #59
0
 def get_im(self, src_im, rect=(0, 0, 100, 100)):
     return cv2.getRectSubPix(src_im,
                              (rect[2] - rect[0], rect[3] - rect[1]),
                              ((rect[2] + rect[0]) / 2,
                               (rect[3] + rect[1]) / 2))
Пример #60
0
import cv2
import os
import glob

img_dir = "C:/users/hp/image/"  # Enter Directory of all images
data_path = os.path.join(img_dir, '*g')
files = glob.glob(data_path)

for f1 in files:
    img = cv2.imread(f1)
    img = cv2.getRectSubPix(img, (320, 220), (150, 170))
    cv2.imwrite(f1, img)